pax_global_header00006660000000000000000000000064150610173410014510gustar00rootroot0000000000000052 comment=f92da626a4d29ca7a13d45dd0243fe4422ac843e python-elasticsearch-9.1.1/000077500000000000000000000000001506101734100156515ustar00rootroot00000000000000python-elasticsearch-9.1.1/.buildkite/000077500000000000000000000000001506101734100177035ustar00rootroot00000000000000python-elasticsearch-9.1.1/.buildkite/Dockerfile000066400000000000000000000014111506101734100216720ustar00rootroot00000000000000ARG PYTHON_VERSION=3.13 FROM python:${PYTHON_VERSION} # Default UID/GID to 1000 # it can be overridden at build time ARG BUILDER_UID=1000 ARG BUILDER_GID=1000 ENV BUILDER_USER elastic ENV BUILDER_GROUP elastic ENV PATH="${PATH}:/var/lib/elastic/.local/bin" # Create user RUN groupadd --system -g ${BUILDER_GID} ${BUILDER_GROUP} \ && useradd --system --shell /bin/bash -u ${BUILDER_UID} -g ${BUILDER_GROUP} -d /var/lib/elastic -m elastic 1>/dev/null 2>/dev/null \ && mkdir -p /code/elasticsearch-py && mkdir /code/elasticsearch-py/build \ && chown -R ${BUILDER_USER}:${BUILDER_GROUP} /code/ WORKDIR /code/elasticsearch-py USER ${BUILDER_USER}:${BUILDER_GROUP} RUN python -m pip install --disable-pip-version-check nox COPY --chown=$BUILDER_USER:$BUILDER_GROUP . . python-elasticsearch-9.1.1/.buildkite/certs/000077500000000000000000000000001506101734100210235ustar00rootroot00000000000000python-elasticsearch-9.1.1/.buildkite/certs/README.md000066400000000000000000000016701506101734100223060ustar00rootroot00000000000000# CI certificates This directory contains certificates that can be used to test against Elasticsearch in CI ## Generating new certificates using the Certificate Authority cert and key Before adding support for Python 3.13, we generated certificates with [`elasticsearch-certutil`](https://www.elastic.co/guide/en/elasticsearch/reference/current/certutil.html). However, those certificates are not compliant with RFC 5280, and Python now enforces compliance by enabling the VERIFY_X509_STRICT flag by default. If you need to generate new certificates, you can do so with [trustme](https://trustme.readthedocs.io/en/latest/) as follows: ``` ```bash pip install trustme python -m trustme --identities instance # Use the filenames expected by our tests mv client.pem ca.crt mv server.pem testnode.crt mv server.key testnode.key ``` For more control over the generated certificates, trustme also offers a Python API, but we have not needed it so far. python-elasticsearch-9.1.1/.buildkite/certs/ca.crt000066400000000000000000000012501506101734100221160ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIIByTCCAW+gAwIBAgIUIYClYWXiTsB8aMrEEMrzdrk5rOswCgYIKoZIzj0EAwIw QDEXMBUGA1UECgwOdHJ1c3RtZSB2MS4yLjAxJTAjBgNVBAsMHFRlc3RpbmcgQ0Eg I2JpdzFXYzEwbHBxQ0ZRTDUwIBcNMDAwMTAxMDAwMDAwWhgPMzAwMDAxMDEwMDAw MDBaMEAxFzAVBgNVBAoMDnRydXN0bWUgdjEuMi4wMSUwIwYDVQQLDBxUZXN0aW5n IENBICNiaXcxV2MxMGxwcUNGUUw1MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE SN7++A76LmOR0tKKra1M6VVzGUljjL9fVPxOEIblOOJJhA7mKLQguNzEHjucNV23 LcDzMX/M/oUBGdYZBbAv4qNFMEMwHQYDVR0OBBYEFCrGGcO9v0UAWSsD93P/x2MT NiJbMBIGA1UdEwEB/wQIMAYBAf8CAQkwDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49 BAMCA0gAMEUCIQDGyO21zIAwmARtoc2atVmmqZdPVkegHkCKCFY4P+KeEAIgKMCz aU8LPCVyA+ZF9K+tcqkNK5h/5s7wlQ5DSeKSuE8= -----END CERTIFICATE----- python-elasticsearch-9.1.1/.buildkite/certs/testnode.crt000066400000000000000000000014561506101734100233700ustar00rootroot00000000000000-----BEGIN CERTIFICATE----- MIICKzCCAdKgAwIBAgIUZeLIKR7XTP5Gx/moiuzcWcfHaSswCgYIKoZIzj0EAwIw QDEXMBUGA1UECgwOdHJ1c3RtZSB2MS4yLjAxJTAjBgNVBAsMHFRlc3RpbmcgQ0Eg I2JpdzFXYzEwbHBxQ0ZRTDUwIBcNMDAwMTAxMDAwMDAwWhgPMzAwMDAxMDEwMDAw MDBaMEIxFzAVBgNVBAoMDnRydXN0bWUgdjEuMi4wMScwJQYDVQQLDB5UZXN0aW5n IGNlcnQgIzNPWkpxTWh0WmxrNGlDMm0wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC AASp6UadRZ0ZP3F2KeEkIUOf0B8GOTX55B91RO/PLUQb26wZcWmHGPOJ0HAy9F2E Y+rJ1zDUnfB5msowei/iuoaMo4GlMIGiMB0GA1UdDgQWBBSP5z3h8b13ul407YOd kyjKNcf/vTAMBgNVHRMBAf8EAjAAMB8GA1UdIwQYMBaAFCrGGcO9v0UAWSsD93P/ x2MTNiJbMBYGA1UdEQEB/wQMMAqCCGluc3RhbmNlMA4GA1UdDwEB/wQEAwIFoDAq BgNVHSUBAf8EIDAeBggrBgEFBQcDAgYIKwYBBQUHAwEGCCsGAQUFBwMDMAoGCCqG SM49BAMCA0cAMEQCIHPP7chQolK+N+GZ+rJ49euoTSzb2YIU5vnCY/bFEWO+AiBC OTFYhR9Mw/e+WdJVZO78XZYKy5uA28JwsZuu7E0kZA== -----END CERTIFICATE----- python-elasticsearch-9.1.1/.buildkite/certs/testnode.key000066400000000000000000000003431506101734100233620ustar00rootroot00000000000000-----BEGIN EC PRIVATE KEY----- MHcCAQEEIN+K8+F47YchiH+7gA8KBG8u35PWcOJN+Fszv8TPEEpdoAoGCCqGSM49 AwEHoUQDQgAEqelGnUWdGT9xdinhJCFDn9AfBjk1+eQfdUTvzy1EG9usGXFphxjz idBwMvRdhGPqydcw1J3weZrKMHov4rqGjA== -----END EC PRIVATE KEY----- python-elasticsearch-9.1.1/.buildkite/functions/000077500000000000000000000000001506101734100217135ustar00rootroot00000000000000python-elasticsearch-9.1.1/.buildkite/functions/cleanup.sh000077500000000000000000000036471506101734100237130ustar00rootroot00000000000000#!/usr/bin/env bash # # Shared cleanup routines between different steps # # Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.0 # - Initial version after refactor function cleanup_volume { if [[ "$(docker volume ls -q -f name=$1)" ]]; then echo -e "\033[34;1mINFO:\033[0m Removing volume $1\033[0m" (docker volume rm "$1") || true fi } function container_running { if [[ "$(docker ps -q -f name=$1)" ]]; then return 0; else return 1; fi } function cleanup_node { if container_running "$1"; then echo -e "\033[34;1mINFO:\033[0m Removing container $1\033[0m" (docker container rm --force --volumes "$1") || true fi if [[ -n "$1" ]]; then echo -e "\033[34;1mINFO:\033[0m Removing volume $1-${suffix}-data\033[0m" cleanup_volume "$1-${suffix}-data" fi } function cleanup_network { if [[ "$(docker network ls -q -f name=$1)" ]]; then echo -e "\033[34;1mINFO:\033[0m Removing network $1\033[0m" (docker network rm "$1") || true fi } function cleanup_trap { status=$? set +x if [[ "$DETACH" != "true" ]]; then echo -e "\033[34;1mINFO:\033[0m clean the network if not detached (start and exit)\033[0m" cleanup_all_in_network "$1" fi # status is 0 or SIGINT if [[ "$status" == "0" || "$status" == "130" ]]; then echo -e "\n\033[32;1mSUCCESS run-tests\033[0m" exit 0 else echo -e "\n\033[31;1mFAILURE during run-tests\033[0m" exit ${status} fi }; function cleanup_all_in_network { if [[ -z "$(docker network ls -q -f name="^$1\$")" ]]; then echo -e "\033[34;1mINFO:\033[0m $1 is already deleted\033[0m" return 0 fi containers=$(docker network inspect -f '{{ range $key, $value := .Containers }}{{ printf "%s\n" .Name}}{{ end }}' $1) while read -r container; do cleanup_node "$container" done <<< "$containers" cleanup_network $1 echo -e "\033[32;1mSUCCESS:\033[0m Cleaned up and exiting\033[0m" }; python-elasticsearch-9.1.1/.buildkite/functions/imports.sh000077500000000000000000000034631506101734100237550ustar00rootroot00000000000000#!/usr/bin/env bash # # Sets up all the common variables and imports relevant functions # # Version 1.0.1 # - Initial version after refactor # - Validate STACK_VERSION asap function require_stack_version() { if [[ -z $STACK_VERSION ]]; then echo -e "\033[31;1mERROR:\033[0m Required environment variable [STACK_VERSION] not set\033[0m" exit 1 fi } require_stack_version if [[ -z $es_node_name ]]; then # only set these once set -euo pipefail export TEST_SUITE=${TEST_SUITE-platinum} export RUNSCRIPTS=${RUNSCRIPTS-} export DETACH=${DETACH-false} export CLEANUP=${CLEANUP-false} export es_node_name=instance export elastic_password=changeme export elasticsearch_image=elasticsearch export elasticsearch_url=https://elastic:${elastic_password}@${es_node_name}:9200 if [[ $TEST_SUITE != "platinum" ]]; then export elasticsearch_url=http://${es_node_name}:9200 fi export external_elasticsearch_url=${elasticsearch_url/$es_node_name/localhost} export elasticsearch_container="${elasticsearch_image}:${STACK_VERSION}" export suffix=rest-test export moniker=$(echo "$elasticsearch_container" | tr -C "[:alnum:]" '-') export network_name=${moniker}${suffix} export ssl_cert="${script_path}/certs/testnode.crt" export ssl_key="${script_path}/certs/testnode.key" export ssl_ca="${script_path}/certs/ca.crt" fi export script_path=$(dirname $(realpath -s $0)) source $script_path/functions/cleanup.sh source $script_path/functions/wait-for-container.sh trap "cleanup_trap ${network_name}" EXIT if [[ "$CLEANUP" == "true" ]]; then cleanup_all_in_network $network_name exit 0 fi echo -e "\033[34;1mINFO:\033[0m Creating network $network_name if it does not exist already \033[0m" docker network inspect "$network_name" > /dev/null 2>&1 || docker network create "$network_name" python-elasticsearch-9.1.1/.buildkite/functions/wait-for-container.sh000077500000000000000000000024741506101734100257710ustar00rootroot00000000000000#!/usr/bin/env bash # # Exposes a routine scripts can call to wait for a container if that container set up a health command # # Please source .buildkite/functions/imports.sh as a whole not just this file # # Version 1.0.1 # - Initial version after refactor # - Make sure wait_for_contiainer is silent function wait_for_container { set +x until ! container_running "$1" || (container_running "$1" && [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "starting" ]]); do echo "" docker inspect -f "{{range .State.Health.Log}}{{.Output}}{{end}}" ${1} echo -e "\033[34;1mINFO:\033[0m waiting for node $1 to be up\033[0m" sleep 2; done; # Always show logs if the container is running, this is very useful both on CI as well as while developing if container_running $1; then docker logs $1 fi if ! container_running $1 || [[ "$(docker inspect -f "{{.State.Health.Status}}" ${1})" != "healthy" ]]; then cleanup_all_in_network $2 echo echo -e "\033[31;1mERROR:\033[0m Failed to start $1 in detached mode beyond health checks\033[0m" echo -e "\033[31;1mERROR:\033[0m dumped the docker log before shutting the node down\033[0m" return 1 else echo echo -e "\033[32;1mSUCCESS:\033[0m Detached and healthy: ${1} on docker network: ${network_name}\033[0m" return 0 fi } python-elasticsearch-9.1.1/.buildkite/pipeline.yml000066400000000000000000000016111506101734100222320ustar00rootroot00000000000000steps: - label: ":elasticsearch: :python: ES Python {{ matrix.python }} {{ matrix.nox_session }} ({{ matrix.connection }})" agents: provider: "gcp" env: PYTHON_VERSION: "{{ matrix.python }}" TEST_SUITE: "platinum" STACK_VERSION: "9.1.0-SNAPSHOT" PYTHON_CONNECTION_CLASS: "{{ matrix.connection }}" NOX_SESSION: "{{ matrix.nox_session }}" matrix: setup: python: - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" connection: - "urllib3" - "requests" nox_session: - "test" adjustments: - with: python: "3.9" connection: "urllib3" nox_session: "test_otel" - with: python: "3.13" connection: "urllib3" nox_session: "test_otel" command: ./.buildkite/run-tests python-elasticsearch-9.1.1/.buildkite/pull-requests.json000066400000000000000000000003741506101734100234270ustar00rootroot00000000000000{ "jobs": [ { "enabled": true, "pipeline_slug": "elasticsearch-py-integration-tests", "allow_org_users": true }, { "enabled": true, "pipeline_slug": "docs-build-pr", "allow_org_users": true } ] } python-elasticsearch-9.1.1/.buildkite/run-elasticsearch.sh000077500000000000000000000127241506101734100236640ustar00rootroot00000000000000#!/usr/bin/env bash # # Launch one or more Elasticsearch nodes via the Docker image, # to form a cluster suitable for running the REST API tests. # # Export the STACK_VERSION variable, eg. '8.0.0-SNAPSHOT'. # Export the TEST_SUITE variable, eg. 'free' or 'platinum' defaults to 'free'. # Export the NUMBER_OF_NODES variable to start more than 1 node # Version 1.6.0 # - Initial version of the run-elasticsearch.sh script # - Deleting the volume should not dependent on the container still running # - Fixed `ES_JAVA_OPTS` config # - Moved to STACK_VERSION and TEST_VERSION # - Refactored into functions and imports # - Support NUMBER_OF_NODES # - Added 5 retries on docker pull for fixing transient network errors # - Added flags to make local CCR configurations work # - Added action.destructive_requires_name=false as the default will be true in v8 # - Added ingest.geoip.downloader.enabled=false as it causes false positives in testing # - Moved ELASTIC_PASSWORD and xpack.security.enabled to the base arguments for "Security On by default" # - Use https only when TEST_SUITE is "platinum", when "free" use http script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh set -euo pipefail echo -e "\033[34;1mINFO:\033[0m Take down node if called twice with the same arguments (DETACH=true) or on seperate terminals \033[0m" cleanup_node $es_node_name master_node_name=${es_node_name} cluster_name=${moniker}${suffix} BUILDKITE=${BUILDKITE-false} # Set vm.max_map_count kernel setting to 262144 if we're in CI if [[ "$BUILDKITE" == "true" ]]; then sudo sysctl -w vm.max_map_count=262144 fi declare -a volumes environment=($(cat <<-END --env ELASTIC_PASSWORD=$elastic_password --env xpack.security.enabled=true --env node.name=$es_node_name --env cluster.name=$cluster_name --env cluster.initial_master_nodes=$master_node_name --env discovery.seed_hosts=$master_node_name --env cluster.routing.allocation.disk.threshold_enabled=false --env bootstrap.memory_lock=true --env node.attr.testattr=test --env path.repo=/tmp --env repositories.url.allowed_urls=http://snapshot.test* --env action.destructive_requires_name=false --env ingest.geoip.downloader.enabled=false --env cluster.deprecation_indexing.enabled=false END )) if [[ "$TEST_SUITE" == "platinum" ]]; then environment+=($(cat <<-END --env xpack.license.self_generated.type=trial --env xpack.security.http.ssl.enabled=true --env xpack.security.http.ssl.verification_mode=certificate --env xpack.security.http.ssl.key=certs/testnode.key --env xpack.security.http.ssl.certificate=certs/testnode.crt --env xpack.security.http.ssl.certificate_authorities=certs/ca.crt --env xpack.security.transport.ssl.enabled=true --env xpack.security.transport.ssl.verification_mode=certificate --env xpack.security.transport.ssl.key=certs/testnode.key --env xpack.security.transport.ssl.certificate=certs/testnode.crt --env xpack.security.transport.ssl.certificate_authorities=certs/ca.crt END )) volumes+=($(cat <<-END --volume $ssl_cert:/usr/share/elasticsearch/config/certs/testnode.crt --volume $ssl_key:/usr/share/elasticsearch/config/certs/testnode.key --volume $ssl_ca:/usr/share/elasticsearch/config/certs/ca.crt END )) else environment+=($(cat <<-END --env xpack.security.http.ssl.enabled=false END )) fi cert_validation_flags="" if [[ "$TEST_SUITE" == "platinum" ]]; then cert_validation_flags="--insecure --cacert /usr/share/elasticsearch/config/certs/ca.crt --resolve ${es_node_name}:443:127.0.0.1" fi # Pull the container, retry on failures up to 5 times with # short delays between each attempt. Fixes most transient network errors. docker_pull_attempts=0 until [ "$docker_pull_attempts" -ge 5 ] do docker pull docker.elastic.co/elasticsearch/"$elasticsearch_container" && break docker_pull_attempts=$((docker_pull_attempts+1)) echo "Failed to pull image, retrying in 10 seconds (retry $docker_pull_attempts/5)..." sleep 10 done NUMBER_OF_NODES=${NUMBER_OF_NODES-1} http_port=9200 for (( i=0; i<$NUMBER_OF_NODES; i++, http_port++ )); do node_name=${es_node_name}$i node_url=${external_elasticsearch_url/9200/${http_port}}$i if [[ "$i" == "0" ]]; then node_name=$es_node_name; fi environment+=($(cat <<-END --env node.name=$node_name END )) echo "$i: $http_port $node_url " volume_name=${node_name}-${suffix}-data volumes+=($(cat <<-END --volume $volume_name:/usr/share/elasticsearch/data${i} END )) # make sure we detach for all but the last node if DETACH=false (default) so all nodes are started local_detach="true" if [[ "$i" == "$((NUMBER_OF_NODES-1))" ]]; then local_detach=$DETACH; fi echo -e "\033[34;1mINFO:\033[0m Starting container $node_name \033[0m" set -x docker run \ -u "$(id -u)" \ --name "$node_name" \ --network "$network_name" \ --env "ES_JAVA_OPTS=-Xms1g -Xmx1g -da:org.elasticsearch.xpack.ccr.index.engine.FollowingEngineAssertions" \ "${environment[@]}" \ "${volumes[@]}" \ --publish "$http_port":9200 \ --ulimit nofile=65536:65536 \ --ulimit memlock=-1:-1 \ --detach="$local_detach" \ --health-cmd="curl $cert_validation_flags --fail $elasticsearch_url/_cluster/health || exit 1" \ --health-interval=2s \ --health-retries=20 \ --health-timeout=2s \ --rm \ docker.elastic.co/elasticsearch/"$elasticsearch_container"; set +x if wait_for_container "$es_node_name" "$network_name"; then echo -e "\033[32;1mSUCCESS:\033[0m Running on: $node_url\033[0m" fi done python-elasticsearch-9.1.1/.buildkite/run-nox.sh000077500000000000000000000001571506101734100216530ustar00rootroot00000000000000#!/bin/bash if [[ -z "$NOX_SESSION" ]]; then NOX_SESSION=test-${PYTHON_VERSION%-dev} fi nox -s $NOX_SESSION python-elasticsearch-9.1.1/.buildkite/run-repository.sh000077500000000000000000000041501506101734100232630ustar00rootroot00000000000000#!/usr/bin/env bash # Called by entry point `run-test` use this script to add your repository specific test commands # Once called Elasticsearch is up and running and the following parameters are available to this script # ELASTICSEARCH_VERSION -- version e.g Major.Minor.Patch(-Prelease) # ELASTICSEARCH_CONTAINER -- the docker moniker as a reference to know which docker image distribution is used # ELASTICSEARCH_URL -- The url at which elasticsearch is reachable # NETWORK_NAME -- The docker network name # NODE_NAME -- The docker container name also used as Elasticsearch node name # When run in CI the test-matrix is used to define additional variables # TEST_SUITE -- either `oss` or `xpack`, defaults to `oss` in `run-tests` set -e echo -e "\033[34;1mINFO:\033[0m URL ${ELASTICSEARCH_URL}\033[0m" echo -e "\033[34;1mINFO:\033[0m VERSION ${ELASTICSEARCH_VERSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m CONTAINER ${ELASTICSEARCH_CONTAINER}\033[0m" echo -e "\033[34;1mINFO:\033[0m TEST_SUITE ${TEST_SUITE}\033[0m" echo -e "\033[34;1mINFO:\033[0m NOX_SESSION ${NOX_SESSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m PYTHON_VERSION ${PYTHON_VERSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m PYTHON_CONNECTION_CLASS ${PYTHON_CONNECTION_CLASS}\033[0m" echo -e "\033[1m>>>>> Build [elastic/elasticsearch-py container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" docker build \ --file .buildkite/Dockerfile \ --tag elastic/elasticsearch-py \ --build-arg "PYTHON_VERSION=${PYTHON_VERSION}" \ --build-arg "BUILDER_UID=$(id -u)" \ --build-arg "BUILDER_GID=$(id -g)" \ . echo -e "\033[1m>>>>> Run [elastic/elasticsearch-py container] >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" mkdir -p junit docker run \ -u "$(id -u):$(id -g)" \ --network=${network_name} \ --env "STACK_VERSION=${STACK_VERSION}" \ --env "ELASTICSEARCH_URL=${elasticsearch_url}" \ --env "TEST_SUITE=${TEST_SUITE}" \ --env "PYTHON_CONNECTION_CLASS=${PYTHON_CONNECTION_CLASS}" \ --env "TEST_TYPE=server" \ --env "FORCE_COLOR=1" \ --name elasticsearch-py \ --rm \ elastic/elasticsearch-py \ nox -s ${NOX_SESSION}-${PYTHON_VERSION} python-elasticsearch-9.1.1/.buildkite/run-tests000077500000000000000000000017111506101734100215750ustar00rootroot00000000000000#!/usr/bin/env bash # # Version 1.1 # - Moved to .ci folder and seperated out `run-repository.sh` # - Add `$RUNSCRIPTS` env var for running Elasticsearch dependent products # Default environment variables export STACK_VERSION="${STACK_VERSION:=8.0.0-SNAPSHOT}" export TEST_SUITE="${TEST_SUITE:=platinum}" export PYTHON_VERSION="${PYTHON_VERSION:=3.13}" export PYTHON_CONNECTION_CLASS="${PYTHON_CONNECTION_CLASS:=urllib3}" script_path=$(dirname $(realpath -s $0)) source $script_path/functions/imports.sh set -euo pipefail echo "--- :elasticsearch: Starting Elasticsearch" DETACH=true bash $script_path/run-elasticsearch.sh if [[ -n "$RUNSCRIPTS" ]]; then for RUNSCRIPT in ${RUNSCRIPTS//,/ }; do echo -e "\033[1m>>>>> Running run-$RUNSCRIPT.sh >>>>>>>>>>>>>>>>>>>>>>>>>>>>>\033[0m" CONTAINER_NAME=${RUNSCRIPT} \ DETACH=true \ bash $script_path/run-${RUNSCRIPT}.sh done fi echo "+++ :python: Client tests" bash $script_path/run-repository.sh python-elasticsearch-9.1.1/.coveragerc000066400000000000000000000004221506101734100177700ustar00rootroot00000000000000[run] omit = */python?.?/* */lib-python/?.?/*.py */lib_pypy/* */site-packages/* *.egg/* elasticsearch/_async/client/ elasticsearch/_sync/client/ test_elasticsearch/* [report] show_missing = True exclude_lines= raise NotImplementedError* python-elasticsearch-9.1.1/.dockerignore000066400000000000000000000000451506101734100203240ustar00rootroot00000000000000docs example venv .tox .nox .*_cache python-elasticsearch-9.1.1/.github/000077500000000000000000000000001506101734100172115ustar00rootroot00000000000000python-elasticsearch-9.1.1/.github/ISSUE_TEMPLATE.md000066400000000000000000000021101506101734100217100ustar00rootroot00000000000000 **Describe the feature**: **Elasticsearch version** (`bin/elasticsearch --version`): **`elasticsearch-py` version (`elasticsearch.__versionstr__`)**: Please make sure the major version matches the Elasticsearch server you are running. **Description of the problem including expected versus actual behavior**: **Steps to reproduce**: python-elasticsearch-9.1.1/.github/make.sh000077500000000000000000000125641506101734100204750ustar00rootroot00000000000000#!/usr/bin/env bash # ------------------------------------------------------- # # # Skeleton for common build entry script for all elastic # clients. Needs to be adapted to individual client usage. # # Must be called: ./.github/make.sh # # Version: 1.1.0 # # Targets: # --------------------------- # assemble : build client artefacts with version # bump : bump client internals to version # codegen : generate endpoints # docsgen : generate documentation # examplegen : generate the doc examples # clean : clean workspace # # ------------------------------------------------------- # # ------------------------------------------------------- # # Bootstrap # ------------------------------------------------------- # script_path=$(dirname "$(realpath -s "$0")") repo=$(realpath "$script_path/../") # shellcheck disable=SC1090 CMD=$1 TASK=$1 TASK_ARGS=() VERSION=$2 STACK_VERSION=$VERSION set -euo pipefail product="elastic/elasticsearch-py" output_folder=".github/output" codegen_folder=".github/output" OUTPUT_DIR="$repo/${output_folder}" REPO_BINDING="${OUTPUT_DIR}:/sln/${output_folder}" WORKFLOW="${WORKFLOW-staging}" mkdir -p "$OUTPUT_DIR" echo -e "\033[34;1mINFO:\033[0m PRODUCT ${product}\033[0m" echo -e "\033[34;1mINFO:\033[0m VERSION ${STACK_VERSION}\033[0m" echo -e "\033[34;1mINFO:\033[0m OUTPUT_DIR ${OUTPUT_DIR}\033[0m" # ------------------------------------------------------- # # Parse Command # ------------------------------------------------------- # case $CMD in clean) echo -e "\033[36;1mTARGET: clean workspace $output_folder\033[0m" rm -rf "$output_folder" echo -e "\033[32;1mdone.\033[0m" exit 0 ;; assemble) if [ -v $VERSION ]; then echo -e "\033[31;1mTARGET: assemble -> missing version parameter\033[0m" exit 1 fi echo -e "\033[36;1mTARGET: assemble artefact $VERSION\033[0m" TASK=release TASK_ARGS=("$VERSION" "$output_folder") ;; codegen) VERSION=$(git rev-parse --abbrev-ref HEAD) echo -e "\033[36;1mTARGET: codegen API $VERSION\033[0m" TASK=codegen # VERSION is BRANCH here for now TASK_ARGS=("$VERSION" "$codegen_folder") ;; docsgen) if [ -v $VERSION ]; then echo -e "\033[31;1mTARGET: docsgen -> missing version parameter\033[0m" exit 1 fi echo -e "\033[36;1mTARGET: generate docs for $VERSION\033[0m" TASK=codegen # VERSION is BRANCH here for now TASK_ARGS=("$VERSION" "$codegen_folder") ;; examplesgen) echo -e "\033[36;1mTARGET: generate examples\033[0m" TASK=codegen # VERSION is BRANCH here for now TASK_ARGS=("$VERSION" "$codegen_folder") ;; bump) if [ -v $VERSION ]; then echo -e "\033[31;1mTARGET: bump -> missing version parameter\033[0m" exit 1 fi echo -e "\033[36;1mTARGET: bump to version $VERSION\033[0m" TASK=bump # VERSION is BRANCH here for now TASK_ARGS=("$VERSION") ;; *) echo -e "\nUsage:\n\t $CMD is not supported right now\n" exit 1 esac # ------------------------------------------------------- # # Build Container # ------------------------------------------------------- # echo -e "\033[34;1mINFO: building $product container\033[0m" docker build \ --build-arg BUILDER_UID="$(id -u)" \ --file $repo/.buildkite/Dockerfile \ --tag ${product} \ . # ------------------------------------------------------- # # Run the Container # ------------------------------------------------------- # echo -e "\033[34;1mINFO: running $product container\033[0m" if [[ "$CMD" == "assemble" ]]; then # Build dists into .github/output docker run \ -u "$(id -u)" \ --rm -v $repo/.github/output:/code/elasticsearch-py/dist \ $product \ /bin/bash -c "pip install build; python /code/elasticsearch-py/utils/build-dists.py $VERSION" # Verify that there are dists in .github/output if compgen -G ".github/output/*" > /dev/null; then # Tarball everything up in .github/output if [[ "$WORKFLOW" == 'snapshot' ]]; then cd $repo/.github/output && tar -czvf elasticsearch-py-$VERSION-SNAPSHOT.tar.gz * && cd - else cd $repo/.github/output && tar -czvf elasticsearch-py-$VERSION.tar.gz * && cd - fi echo -e "\033[32;1mTARGET: successfully assembled client v$VERSION\033[0m" exit 0 else echo -e "\033[31;1mTARGET: assemble failed, empty workspace!\033[0m" exit 1 fi fi if [[ "$CMD" == "bump" ]]; then docker run \ --rm -v $repo:/code/elasticsearch-py \ $product \ /bin/bash -c "python /code/elasticsearch-py/utils/bump-version.py $VERSION" exit 0 fi if [[ "$CMD" == "codegen" ]]; then docker run \ --rm -v $repo:/code/elasticsearch-py \ $product \ /bin/bash -c "cd /code && python -m pip install nox && \ git clone https://$CLIENTS_GITHUB_TOKEN@github.com/elastic/elastic-client-generator-python.git && \ cd /code/elastic-client-generator-python && GIT_BRANCH=$VERSION python -m nox -s generate-es && \ cd /code/elasticsearch-py && python -m nox -s format" exit 0 fi if [[ "$CMD" == "docsgen" ]]; then echo "TODO" fi if [[ "$CMD" == "examplesgen" ]]; then echo "TODO" fi echo "Must be called with '.github/make.sh [command]" exit 1 python-elasticsearch-9.1.1/.github/workflows/000077500000000000000000000000001506101734100212465ustar00rootroot00000000000000python-elasticsearch-9.1.1/.github/workflows/backport.yml000066400000000000000000000013001506101734100235700ustar00rootroot00000000000000name: Backport on: pull_request_target: types: - closed - labeled jobs: backport: name: Backport runs-on: ubuntu-latest # Only react to merged PRs for security reasons. # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. if: > github.event.pull_request.merged && ( github.event.action == 'closed' || ( github.event.action == 'labeled' && contains(github.event.label.name, 'backport') ) ) steps: - uses: tibdex/backport@9565281eda0731b1d20c4025c43339fb0a23812e # v2.0.4 with: github_token: ${{ secrets.GITHUB_TOKEN }} python-elasticsearch-9.1.1/.github/workflows/ci.yml000066400000000000000000000031421506101734100223640ustar00rootroot00000000000000--- name: CI on: [push, pull_request] jobs: lint: runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v4 - name: Set up Python 3.x uses: actions/setup-python@v5 with: python-version: "3.x" - name: Install dependencies run: | python3 -m pip install nox - name: Lint the code run: nox -s lint package: runs-on: ubuntu-latest steps: - name: Checkout Repository uses: actions/checkout@v4 - name: Set up Python 3.x uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install dependencies run: | python3 -m pip install build - name: Build dists run: python utils/build-dists.py test-linux: strategy: fail-fast: false matrix: python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"] nox-session: [""] runs-on: ["ubuntu-latest"] runs-on: ${{ matrix.runs-on }} name: test-${{ matrix.python-version }} continue-on-error: false steps: - name: Checkout Repository uses: actions/checkout@v4 - name: Set Up Python - ${{ matrix.python-version }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install Dependencies run: | python -m pip install nox - name: Run Tests shell: bash run: .buildkite/run-nox.sh env: PYTHON_VERSION: ${{ matrix.python-version }} NOX_SESSION: ${{ matrix.nox-session }} python-elasticsearch-9.1.1/.github/workflows/docs-build.yml000066400000000000000000000005301506101734100240140ustar00rootroot00000000000000name: docs-build on: push: branches: - main pull_request_target: ~ merge_group: ~ jobs: docs-preview: uses: elastic/docs-builder/.github/workflows/preview-build.yml@main with: path-pattern: docs/** permissions: deployments: write id-token: write contents: read pull-requests: write python-elasticsearch-9.1.1/.github/workflows/docs-cleanup.yml000066400000000000000000000003771506101734100243550ustar00rootroot00000000000000name: docs-cleanup on: pull_request_target: types: - closed jobs: docs-preview: uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main permissions: contents: none id-token: write deployments: write python-elasticsearch-9.1.1/.gitignore000066400000000000000000000042671506101734100176520ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST node_modules # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ cover/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/sphinx/_build/ # PyBuilder .pybuilder/ target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv # For a library or package, you might want to ignore these files since the code is # intended to run in multiple environments; otherwise, check them in: # .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # pytype static type analyzer .pytype/ # Cython debug symbols cython_debug/ # Pycharm project settings .idea # elasticsearch files test_elasticsearch/cover test_elasticsearch/local.py .buildkite/output junit/ # sample code for GitHub issues issues/ python-elasticsearch-9.1.1/.readthedocs.yml000066400000000000000000000007741506101734100207470ustar00rootroot00000000000000version: 2 build: os: ubuntu-22.04 tools: # To work around https://github.com/aio-libs/aiohttp/issues/7675, we need # to set AIOHTTP_NO_EXTENSIONS to 1 but it has to be done in # https://readthedocs.org/dashboard/elasticsearch-py/environmentvariables/ # because of https://github.com/readthedocs/readthedocs.org/issues/6311 python: "3" python: install: - path: . extra_requirements: - "docs" sphinx: configuration: docs/sphinx/conf.py fail_on_warning: true python-elasticsearch-9.1.1/CHANGELOG.md000066400000000000000000000001401506101734100174550ustar00rootroot00000000000000See: https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html python-elasticsearch-9.1.1/CODE_OF_CONDUCT.md000066400000000000000000000000631506101734100204470ustar00rootroot00000000000000See: https://www.elastic.co/community/codeofconductpython-elasticsearch-9.1.1/CONTRIBUTING.md000066400000000000000000000067431506101734100201140ustar00rootroot00000000000000# Contributing to the Python Elasticsearch Client If you have a bugfix or new feature that you would like to contribute to elasticsearch-py, please find or open an issue about it first. Talk about what you would like to do. It may be that somebody is already working on it, or that there are particular issues that you should know about before implementing the change. We enjoy working with contributors to get their code accepted. There are many approaches to fixing a problem and it is important to find the best approach before writing too much code. ## Running Elasticsearch locally We've provided a script to start an Elasticsearch cluster of a certain version found at `.buildkite/run-elasticsearch.sh`. There are several environment variables that control integration tests: - `PYTHON_VERSION`: Version of Python to use, defaults to `3.9` - `PYTHON_CONNECTION_CLASS`: Connection class to use, defaults to `Urllib3HttpConnection` - `STACK_VERSION`: Version of Elasticsearch to use. These should be the same as tags of `docker.elastic.co/elasticsearch/elasticsearch` such as `8.0.0-SNAPSHOT`, `7.x-SNAPSHOT`, etc. Defaults to the same `*-SNAPSHOT` version as the branch. **NOTE: You don't need to run the live integration tests for all changes. If you don't have Elasticsearch running locally the integration tests will be skipped.** ## API Code Generation All API methods for the `Elasticsearch` and `AsyncElasticsearch` client instances (like `search()`) are automatically generated from the [Elasticsearch specification](https://github.com/elastic/elasticsearch-specification) and [rest-api-spec](https://github.com/elastic/elasticsearch/tree/master/rest-api-spec/src/main/resources/rest-api-spec/api). Any changes to these methods should instead be submitted to the Elasticsearch specification project and will be imported the next time the clients API is generated. The generator itself is currently a private project. ## Contributing Code Changes The process for contributing to any of the Elasticsearch repositories is similar. 1. Please make sure you have signed the [Contributor License Agreement](http://www.elastic.co/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once. 2. Run the linter and test suite to ensure your changes do not break existing code: ``` # Install Nox for task management $ python -m pip install nox # Auto-format and lint your changes $ nox -rs format # Run the test suite $ nox -rs test ``` 3. Rebase your changes. Update your local repository with the most recent code from the main elasticsearch-py repository, and rebase your branch on top of the latest `main` branch. We prefer your changes to be squashed into a single commit for easier backporting. 4. Submit a pull request. Push your local changes to your forked copy of the repository and submit a pull request. In the pull request, describe what your changes do and mention the number of the issue where discussion has taken place, eg “Closes #123″. Please consider adding or modifying tests related to your changes. Then sit back and wait. There will probably be a discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into elasticsearch-py. python-elasticsearch-9.1.1/LICENSE000066400000000000000000000236371506101734100166710ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. python-elasticsearch-9.1.1/NOTICE000066400000000000000000000000751506101734100165570ustar00rootroot00000000000000Elasticsearch Python Client Copyright 2022 Elasticsearch B.V.python-elasticsearch-9.1.1/README.md000066400000000000000000000116271506101734100171370ustar00rootroot00000000000000

Elastic logo

# Elasticsearch Python Client

PyPI Version Python Versions Conda Version Downloads
Build Status on GitHub Buildkite Status on Buildkite Documentation Status

*The official Python client for Elasticsearch.* ## Features * Translating basic Python data types to and from JSON * Configurable automatic discovery of cluster nodes * Persistent connections * Load balancing (with pluggable selection strategy) across available nodes * Failed connection penalization (time based - failed connections won't be retried until a timeout is reached) * Support for TLS and HTTP authentication * Thread safety across requests * Pluggable architecture * Helper functions for idiomatically using APIs together ## Installation [Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch) or [sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page) for a free trial of Elastic Cloud. Refer to the [Installation section](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_installation) of the getting started documentation. ## Connecting Refer to the [Connecting section](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_connecting) of the getting started documentation. ## Usage ----- * [Creating an index](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_creating_an_index) * [Indexing a document](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_indexing_documents) * [Getting documents](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_getting_documents) * [Searching documents](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_searching_documents) * [Updating documents](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_updating_documents) * [Deleting documents](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_deleting_documents) * [Deleting an index](https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html#_deleting_an_index) ## Compatibility Language clients are _forward compatible:_ each client version works with equivalent and later minor versions of Elasticsearch without breaking. Compatibility does not imply full feature parity. New Elasticsearch features are supported only in equivalent client versions. For example, an 8.12 client fully supports Elasticsearch 8.12 features and works with 8.13 without breaking; however, it does not support new Elasticsearch 8.13 features. An 8.13 client fully supports Elasticsearch 8.13 features. | Elasticsearch version | elasticsearch-py branch | | --- | --- | | main | main | | 9.x | 9.x | | 9.x | 8.x | | 8.x | 8.x | Elasticsearch language clients are also _backward compatible_ across minor versions — with default distributions and without guarantees. > [!TIP] > To upgrade to a new major version, first upgrade Elasticsearch, then upgrade the Python Elasticsearch client. If you need to work with multiple client versions, note that older versions are also released as `elasticsearch7` and `elasticsearch8`. ## Documentation Documentation for the client is [available on elastic.co] and [Read the Docs]. [available on elastic.co]: https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html [Read the Docs]: https://elasticsearch-py.readthedocs.io ## License This software is licensed under the [Apache License 2.0](./LICENSE). See [NOTICE](./NOTICE). python-elasticsearch-9.1.1/catalog-info.yaml000066400000000000000000000033731506101734100211060ustar00rootroot00000000000000--- # yaml-language-server: $schema=https://json.schemastore.org/catalog-info.json apiVersion: backstage.io/v1alpha1 kind: Component metadata: name: elasticsearch-py spec: type: library owner: group:devtools-team lifecycle: production dependsOn: - "resource:elasticsearch-py" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 kind: Resource metadata: name: elasticsearch-py description: elasticsearch-py integration tests spec: type: buildkite-pipeline owner: group:devtools-team system: buildkite implementation: apiVersion: buildkite.elastic.dev/v1 kind: Pipeline metadata: name: elasticsearch-py integration tests spec: repository: elastic/elasticsearch-py pipeline_file: .buildkite/pipeline.yml env: ELASTIC_SLACK_NOTIFICATIONS_ENABLED: 'true' SLACK_NOTIFICATIONS_CHANNEL: '#devtools-notify-python' teams: devtools-team: access_level: MANAGE_BUILD_AND_READ everyone: access_level: READ_ONLY cancel_intermediate_builds: true cancel_intermediate_builds_branch_filter: '!main' schedules: main: branch: 'main' cronline: '0 10 * * *' message: 'Daily run for main branch' Daily 9.0: branch: '9.0' cronline: '0 10 * * *' message: 'Daily run for 9.0 branch' Daily 8.19: branch: '8.19' cronline: '0 10 * * *' message: 'Daily run for 8.19 branch' Daily 8.18: branch: '8.18' cronline: '0 10 * * *' message: 'Daily run for 8.18 branch' python-elasticsearch-9.1.1/docs/000077500000000000000000000000001506101734100166015ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/docset.yml000066400000000000000000000003221506101734100206020ustar00rootroot00000000000000project: 'Python client' products: - id: elasticsearch-client cross_links: - apm-agent-python - docs-content - elasticsearch toc: - toc: reference - toc: release-notes subs: es: "Elasticsearch" python-elasticsearch-9.1.1/docs/examples/000077500000000000000000000000001506101734100204175ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/examples/00272f75a6afea91f8554ef7cda0c1f2.asciidoc000066400000000000000000000003171506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-cache.asciidoc:75 [source, python] ---- resp = client.security.clear_cached_realms( realms="default_file,ldap1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/004743b9c9f61588926ccf734696b713.asciidoc000066400000000000000000000003461506101734100261510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/forcemerge.asciidoc:216 [source, python] ---- resp = client.indices.forcemerge( index=".ds-my-data-stream-2099.03.07-000001", max_num_segments="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/004a17b42ab5155bb61da797a006fa9f.asciidoc000066400000000000000000000006461506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/pinned-query.asciidoc:13 [source, python] ---- resp = client.search( query={ "pinned": { "ids": [ "1", "4", "100" ], "organic": { "match": { "description": "iphone" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/006e0e16c9f1da58c0bfe57377f7fc38.asciidoc000066400000000000000000000007411506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stemmer-tokenfilter.asciidoc:85 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "whitespace", "filter": [ "stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/007179b5e241da650562a5f0a5007823.asciidoc000066400000000000000000000015431506101734100260770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:193 [source, python] ---- resp = client.watcher.put_watch( id="cluster_health_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "http": { "request": { "host": "localhost", "port": 9200, "path": "/_cluster/health" } } }, condition={ "compare": { "ctx.payload.status": { "eq": "red" } } }, actions={ "send_email": { "email": { "to": "username@example.org", "subject": "Cluster Status Warning", "body": "Cluster status is RED" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/008ed823c89e703c447ac89c6b689833.asciidoc000066400000000000000000000002631506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/feature-migration.asciidoc:158 [source, python] ---- resp = client.migration.post_feature_upgrade() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0091fc75271b1fbbd4269622a4881e8b.asciidoc000066400000000000000000000004641506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:107 [source, python] ---- resp = client.search( index="my-index", query={ "match": { "http.clientip": "40.135.0.0" } }, fields=[ "http.clientip" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00ad41bde67beac991534ae0e04b1296.asciidoc000066400000000000000000000003751506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:273 [source, python] ---- resp = client.indices.get_data_stream( name="my-data-stream", filter_path="data_streams.indices.index_name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00b3b6d76a368ae71277ea24af318693.asciidoc000066400000000000000000000002351506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shard-stores.asciidoc:140 [source, python] ---- resp = client.indices.shard_stores() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00c05aa931fc985985e3e21c93cf43ff.asciidoc000066400000000000000000000004771506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:443 [source, python] ---- resp = client.render_search_template( source="{ \"query\": {{#toJson}}my_query{{/toJson}} }", params={ "my_query": { "match_all": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00d65f7b9daa1c6b18eedd8ace206bae.asciidoc000066400000000000000000000004261506101734100271250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/asciifolding-tokenfilter.asciidoc:21 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "asciifolding" ], text="açaí à la carte", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00e0c964c79fcc1876ab957da2ffce82.asciidoc000066400000000000000000000037121506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1204 [source, python] ---- resp = client.indices.create( index="italian_example", settings={ "analysis": { "filter": { "italian_elision": { "type": "elision", "articles": [ "c", "l", "all", "dall", "dell", "nell", "sull", "coll", "pell", "gl", "agl", "dagl", "degl", "negl", "sugl", "un", "m", "t", "s", "v", "d" ], "articles_case": True }, "italian_stop": { "type": "stop", "stopwords": "_italian_" }, "italian_keywords": { "type": "keyword_marker", "keywords": [ "esempio" ] }, "italian_stemmer": { "type": "stemmer", "language": "light_italian" } }, "analyzer": { "rebuilt_italian": { "tokenizer": "standard", "filter": [ "italian_elision", "lowercase", "italian_stop", "italian_keywords", "italian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/00fea15cbca83be9d5f1a024ff2ec708.asciidoc000066400000000000000000000006751506101734100267700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:204 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="my-e5-model", inference_config={ "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, "model_id": ".multilingual-e5-small" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/010d5e901a2690fa7b2396edbe6cd463.asciidoc000066400000000000000000000004601506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/common-log-format-example.asciidoc:161 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/015e6e6132b6d6d44bddb06bc3b316ed.asciidoc000066400000000000000000000021321506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1051 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "range": { "year": { "gt": 2023 } } } } }, { "standard": { "query": { "term": { "topic": "elastic" } } } } ], "rank_window_size": 10, "rank_constant": 1 } }, source=False, aggs={ "topics": { "terms": { "field": "topic" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0163af36c8472ac0c5160c8b716f5b26.asciidoc000066400000000000000000000006461506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filter-aggregation.asciidoc:58 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", query={ "term": { "type": "t-shirt" } }, aggs={ "avg_price": { "avg": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0165d22da5f2fc7678392b31d8eb5566.asciidoc000066400000000000000000000006511506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1363 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="my-rerank-model", inference_config={ "service": "cohere", "service_settings": { "model_id": "rerank-english-v3.0", "api_key": "{{COHERE_API_KEY}}" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/016f3147dae9ff2c3e831257ae470361.asciidoc000066400000000000000000000004371506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:54 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "logs-*", "alias": "logs" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/019e329ed5a930aef825266822e7377a.asciidoc000066400000000000000000000012631506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/asciifolding-tokenfilter.asciidoc:118 [source, python] ---- resp = client.indices.create( index="asciifold_example", settings={ "analysis": { "analyzer": { "standard_asciifolding": { "tokenizer": "standard", "filter": [ "my_ascii_folding" ] } }, "filter": { "my_ascii_folding": { "type": "asciifolding", "preserve_original": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/01ae196538fac197eedbbf458a4ef31b.asciidoc000066400000000000000000000013151506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/keyword.asciidoc:260 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "kwd": { "type": "keyword", "ignore_above": 3 } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "kwd": [ "foo", "foo", "bang", "bar", "baz" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/01b23f09d2b7f140faf649eadbbf3ac3.asciidoc000066400000000000000000000015211506101734100267540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/index-templates.asciidoc:86 [source, python] ---- resp = client.cluster.put_component_template( name="component_template1", template={ "mappings": { "properties": { "@timestamp": { "type": "date" } } } }, ) print(resp) resp1 = client.cluster.put_component_template( name="runtime_component_template", template={ "mappings": { "runtime": { "day_of_week": { "type": "keyword", "script": { "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/01bc0f2ed30eb3dd23511d01ce0ac6e1.asciidoc000066400000000000000000000003241506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/start-transform.asciidoc:85 [source, python] ---- resp = client.transform.start_transform( transform_id="ecommerce_transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/01cd0ea360282a2c591a366679d7187d.asciidoc000066400000000000000000000003711506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:83 [source, python] ---- resp = client.tasks.list( human=True, detailed=True, actions="indices:data/write/bulk", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/01da9e0620e48270617fc248e6415cac.asciidoc000066400000000000000000000004421506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:36 [source, python] ---- resp = client.search( index="my-index-000001", aggs={ "my-agg-name": { "terms": { "field": "my-field" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/01dc7bdc223bd651574ed2d3954a5b1c.asciidoc000066400000000000000000000002741506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:153 [source, python] ---- resp = client.watcher.execute_watch( id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/01f50acf7998b24969f451e922d145eb.asciidoc000066400000000000000000000021111506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:184 [source, python] ---- resp = client.indices.create( index="basque_example", settings={ "analysis": { "filter": { "basque_stop": { "type": "stop", "stopwords": "_basque_" }, "basque_keywords": { "type": "keyword_marker", "keywords": [ "Adibidez" ] }, "basque_stemmer": { "type": "stemmer", "language": "basque" } }, "analyzer": { "rebuilt_basque": { "tokenizer": "standard", "filter": [ "lowercase", "basque_stop", "basque_keywords", "basque_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/020c95db88ef356093f03be84893ddf9.asciidoc000066400000000000000000000002721506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/get-follow-stats.asciidoc:41 [source, python] ---- resp = client.ccr.follow_stats( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/020de6b6cb960a76297452725a38889f.asciidoc000066400000000000000000000006121506101734100262160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-child-query.asciidoc:53 [source, python] ---- resp = client.search( query={ "has_child": { "type": "child", "query": { "match_all": {} }, "max_children": 10, "min_children": 2, "score_mode": "min" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0246f73cc2ed3dfec577119e8cd15404.asciidoc000066400000000000000000000005461506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:183 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "name": { "properties": { "last": { "type": "text" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/025155da86802ebf4c3aeee5aab692f9.asciidoc000066400000000000000000000011731506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:254 [source, python] ---- resp = client.indices.create( index="sales", mappings={ "properties": { "tags": { "type": "keyword" }, "comments": { "type": "nested", "properties": { "username": { "type": "keyword" }, "comment": { "type": "text" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02520ac7816b2c4cf8fb413fd16122f2.asciidoc000066400000000000000000000003321506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/flush-job.asciidoc:81 [source, python] ---- resp = client.ml.flush_job( job_id="low_request_rate", calc_interim=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0264e994a7e68561e2ca6be0f0d90ee9.asciidoc000066400000000000000000000011431506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:571 [source, python] ---- resp = client.search( aggs={ "JapaneseCars": { "terms": { "field": "make", "include": [ "mazda", "honda" ] } }, "ActiveCarManufacturers": { "terms": { "field": "make", "exclude": [ "rover", "jensen" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0280247e0cf2e561c548f22c9fb31163.asciidoc000066400000000000000000000003111506101734100262410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:205 [source, python] ---- resp = client.security.invalidate_token( username="myuser", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02853293a5b7cd9cc7a886eb413bbeb6.asciidoc000066400000000000000000000011601506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/mapping-charfilter.asciidoc:26 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", char_filter=[ { "type": "mapping", "mappings": [ "٠ => 0", "١ => 1", "٢ => 2", "٣ => 3", "٤ => 4", "٥ => 5", "٦ => 6", "٧ => 7", "٨ => 8", "٩ => 9" ] } ], text="My license plate is ٢٥٠١٥", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/029de2f5383a42e1ac4ca1565bd2a130.asciidoc000066400000000000000000000006741506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/index-prefixes.asciidoc:41 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "full_name": { "type": "text", "index_prefixes": { "min_chars": 1, "max_chars": 10 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02b00f21e9d23d82276ace0dd154d779.asciidoc000066400000000000000000000004371506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/routing-field.asciidoc:62 [source, python] ---- resp = client.search( index="my-index-000001", routing="user1,user2", query={ "match": { "title": "document" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02b6aa3e5652839f03de3a655854b897.asciidoc000066400000000000000000000002701506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:466 [source, python] ---- resp = client.search( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02c48d461536709c3fc8a0e8147c3787.asciidoc000066400000000000000000000007301506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/pipeline.asciidoc:54 [source, python] ---- resp = client.ingest.put_pipeline( id="pipelineB", description="outer pipeline", processors=[ { "pipeline": { "name": "pipelineA" } }, { "set": { "field": "outer_pipeline_set", "value": "outer" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02f65c6bab8f40bf3ce18160623d1870.asciidoc000066400000000000000000000002751506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template-v1.asciidoc:41 [source, python] ---- resp = client.indices.get_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/02fad6b80bb29c2a7e6840db2fc67b18.asciidoc000066400000000000000000000012471506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/wildcard.asciidoc:78 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_wildcard": { "type": "wildcard" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "my_wildcard": "This string can be quite lengthy" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "wildcard": { "my_wildcard": { "value": "*quite*lengthy" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/0308cbd85281f95fc458042afe3f587d.asciidoc000066400000000000000000000002741506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:85 [source, python] ---- resp = client.get( index="my-index-000001", id="0", source="*.id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/032eac56b798bea29390e102538f4a26.asciidoc000066400000000000000000000003011506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/refresh.asciidoc:109 [source, python] ---- resp = client.indices.refresh( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/033838729cfb5d1a28d04f69ee78d924.asciidoc000066400000000000000000000017171506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:299 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "Polygon", "orientation": "LEFT", "coordinates": [ [ [ -177, 10 ], [ 176, 15 ], [ 172, 0 ], [ 176, -15 ], [ -177, -10 ], [ -177, 10 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0350410d11579f4e876c798ce1eaef5b.asciidoc000066400000000000000000000016151506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:565 [source, python] ---- resp = client.index( index="my-index-000001", id="5", refresh=True, document={ "query": { "bool": { "should": [ { "match": { "message": { "query": "Japanese art", "_name": "query1" } } }, { "match": { "message": { "query": "Holand culture", "_name": "query2" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0350ff5ebb8207c004eb771088339cb4.asciidoc000066400000000000000000000020001506101734100263160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rrf.asciidoc:127 [source, python] ---- resp = client.search( index="example-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "term": { "text": "blue shoes sale" } } } }, { "standard": { "query": { "sparse_vector": { "field": "ml.tokens", "inference_id": "my_elser_model", "query": "What blue shoes are on sale?" } } } } ], "rank_window_size": 50, "rank_constant": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/03582fc93683e573062bcfda45e01d69.asciidoc000066400000000000000000000014431506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/custom-analyzer.asciidoc:59 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_analyzer": { "type": "custom", "tokenizer": "standard", "char_filter": [ "html_strip" ], "filter": [ "lowercase", "asciifolding" ] } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_custom_analyzer", text="Is this déjà vu?", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/035a7a919eb6513b4769a3727b7d6447.asciidoc000066400000000000000000000003171506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/testing.asciidoc:9 [source, python] ---- resp = client.indices.analyze( analyzer="whitespace", text="The quick brown fox.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/03891265df2111a38e0b6b24c1b967e1.asciidoc000066400000000000000000000002701506101734100262410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-service-accounts.asciidoc:320 [source, python] ---- resp = client.security.get_service_accounts() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/03b1d76fa0b773d5b7d74ecb7e1e1a80.asciidoc000066400000000000000000000005331506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:152 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot_2099.05.06", indices="my-index,logs-my_app-default", rename_pattern="(.+)", rename_replacement="restored-$1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/03c4b815bf1e6a8c5cfcc6ddf94bc093.asciidoc000066400000000000000000000003531506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/sql-search-api.asciidoc:17 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library ORDER BY page_count DESC LIMIT 5", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/04412d11783dac25b5fd2ec5407078a3.asciidoc000066400000000000000000000004571506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-api-key-id-api.asciidoc:93 [source, python] ---- resp = client.connector.update_api_key_id( connector_id="my-connector", api_key_id="my-api-key-id", api_key_secret_id="my-connector-secret-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/044b2f99e7438e408685b258db17f863.asciidoc000066400000000000000000000003661506101734100262340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:141 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n process where process.name == \"regsvr32.exe\"\n ", size=50, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/046b2249bbc49e77848c114cee940f17.asciidoc000066400000000000000000000034161506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/text-expansion-query.asciidoc:164 [source, python] ---- resp = client.search( index="my-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "multi_match": { "query": "How is the weather in Jamaica?", "fields": [ "title", "description" ] } } } }, { "standard": { "query": { "text_expansion": { "ml.inference.title_expanded.predicted_value": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?" } } } } }, { "standard": { "query": { "text_expansion": { "ml.inference.description_expanded.predicted_value": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?" } } } } } ], "window_size": 10, "rank_constant": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0470d7101637568b9d3d1239f06325a7.asciidoc000066400000000000000000000015401506101734100260340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-desired-nodes.asciidoc:21 [source, python] ---- resp = client.perform_request( "PUT", "/_internal/desired_nodes/<history_id>/<version>", headers={"Content-Type": "application/json"}, body={ "nodes": [ { "settings": { "node.name": "instance-000187", "node.external_id": "instance-000187", "node.roles": [ "data_hot", "master" ], "node.attr.data": "hot", "node.attr.logical_availability_zone": "zone-0" }, "processors": 8, "memory": "58gb", "storage": "2tb" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/047266b0d20fdb62ebc72d51952c8f6d.asciidoc000066400000000000000000000006231506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:344 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "Will Smith", "type": "cross_fields", "fields": [ "first_name", "last_name" ], "operator": "and" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/048652b6abfe195da8ea8cef10ee01b1.asciidoc000066400000000000000000000003241506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/reset-transform.asciidoc:67 [source, python] ---- resp = client.transform.reset_transform( transform_id="ecommerce_transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/04d586a536061ec1045d0bb2dc3d1a5f.asciidoc000066400000000000000000000010731506101734100264470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/set.asciidoc:39 [source, python] ---- resp = client.ingest.put_pipeline( id="set_os", description="sets the value of host.os.name from the field os", processors=[ { "set": { "field": "host.os.name", "value": "{{{os}}}" } } ], ) print(resp) resp1 = client.ingest.simulate( id="set_os", docs=[ { "_source": { "os": "Ubuntu" } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/04d6ce0c903bd468afbecd3aa1c4a78a.asciidoc000066400000000000000000000011551506101734100270400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/put-pipeline.asciidoc:126 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline-id", description="My optional pipeline description", processors=[ { "set": { "description": "My optional processor description", "field": "my-keyword-field", "value": "foo" } } ], meta={ "reason": "set my-keyword-field to foo", "serialization": { "class": "MyPipeline", "id": 10 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/04de2e3a9c00c2056b07bf9cf9e63a99.asciidoc000066400000000000000000000010231506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-google-vertex-ai.asciidoc:133 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="google_vertex_ai_embeddings", inference_config={ "service": "googlevertexai", "service_settings": { "service_account_json": "", "model_id": "", "location": "", "project_id": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/04f5dd677c777bcb15d7d5fa63275fc8.asciidoc000066400000000000000000000003021506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/health.asciidoc:48 [source, python] ---- resp = client.cluster.health( wait_for_status="yellow", timeout="50s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0502284d4685c478eb68761f979f4303.asciidoc000066400000000000000000000015131506101734100260660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:321 [source, python] ---- resp = client.ml.evaluate_data_frame( index="house_price_predictions", query={ "bool": { "filter": [ { "term": { "ml.is_training": False } } ] } }, evaluation={ "regression": { "actual_field": "price", "predicted_field": "ml.price_prediction", "metrics": { "r_squared": {}, "mse": {}, "msle": { "offset": 10 }, "huber": { "delta": 1.5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/050b3947025fee403232b8e6e9112dab.asciidoc000066400000000000000000000003621506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:256 [source, python] ---- resp = client.sql.query( format="yaml", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05148cc541f447486d9daf15ab77292b.asciidoc000066400000000000000000000025561506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/ilm.asciidoc:31 [source, python] ---- resp = client.ilm.put_lifecycle( name="logs", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb" } } }, "warm": { "min_age": "30d", "actions": { "shrink": { "number_of_shards": 1 }, "forcemerge": { "max_num_segments": 1 } } }, "cold": { "min_age": "60d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "frozen": { "min_age": "90d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "delete": { "min_age": "735d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0518c673094fb18ecb491a3b78af4695.asciidoc000066400000000000000000000007431506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-allocate.asciidoc:89 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "allocate": { "include": { "box_type": "hot,warm" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05284c8ea91769c09c8db47db8a6629a.asciidoc000066400000000000000000000002411506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/repositories.asciidoc:57 [source, python] ---- resp = client.cat.repositories( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/053497b6960f80fd7b005b7c6d54358f.asciidoc000066400000000000000000000005321506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-delete.asciidoc:40 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "delete": { "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05500e77aef581d92f6c605f7a48f7df.asciidoc000066400000000000000000000015231506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:199 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "polygon", "coordinates": [ [ [ 1000, -1001 ], [ 1001, -1001 ], [ 1001, -1000 ], [ 1000, -1000 ], [ 1000, -1001 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/059e04aaf093379401f665c33ac796dc.asciidoc000066400000000000000000000007041506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc:163 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "keyword_marker", "keywords": [ "jumping" ] }, "stemmer" ], text="fox running and jumping", explain=True, attributes="keyword", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05a09078fe1016e900e445ad4039cf97.asciidoc000066400000000000000000000027311506101734100262600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc:8 [source, python] ---- resp = client.indices.create( index="clientips", mappings={ "properties": { "client_ip": { "type": "keyword" }, "env": { "type": "keyword" } } }, ) print(resp) resp1 = client.bulk( index="clientips", operations=[ { "index": {} }, { "client_ip": "172.21.0.5", "env": "Development" }, { "index": {} }, { "client_ip": "172.21.2.113", "env": "QA" }, { "index": {} }, { "client_ip": "172.21.2.162", "env": "QA" }, { "index": {} }, { "client_ip": "172.21.3.15", "env": "Production" }, { "index": {} }, { "client_ip": "172.21.3.16", "env": "Production" } ], ) print(resp1) resp2 = client.enrich.put_policy( name="clientip_policy", match={ "indices": "clientips", "match_field": "client_ip", "enrich_fields": [ "env" ] }, ) print(resp2) resp3 = client.enrich.execute_policy( name="clientip_policy", wait_for_completion=False, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/05ba0fdd0215e313ecea8a2f8f5a43b4.asciidoc000066400000000000000000000003111506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:360 [source, python] ---- resp = client.indices.get_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05bee3adf46b9d6a2fef96c51bf958da.asciidoc000066400000000000000000000007611506101734100270770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/document-level-security.asciidoc:46 [source, python] ---- resp = client.security.put_role( name="click_role", indices=[ { "names": [ "events-*" ], "privileges": [ "read" ], "query": { "match": { "category": "click" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05e637284bc3bedd46e0b7c26ad983c4.asciidoc000066400000000000000000000010411506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:249 [source, python] ---- resp = client.ingest.put_pipeline( id="alibabacloud_ai_search_embeddings_pipeline", processors=[ { "inference": { "model_id": "alibabacloud_ai_search_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05f4a4b284f68f7fb13603d7cd854083.asciidoc000066400000000000000000000005211506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:332 [source, python] ---- resp = client.indices.put_settings( index="logs-my_app-default", settings={ "index": { "lifecycle": { "name": "new-lifecycle-policy" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/05f6049c677a156bdf9b83e71a3b87ed.asciidoc000066400000000000000000000002311506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/ssl.asciidoc:90 [source, python] ---- resp = client.ssl.certificates() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0601b5cb5328c9ebff30f4be1b210f93.asciidoc000066400000000000000000000003551506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-status-api.asciidoc:333 [source, python] ---- resp = client.snapshot.status( repository="my_repository", snapshot="snapshot_2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/060a56477e39f272fc5a9cfe47443cf1.asciidoc000066400000000000000000000013171506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/simplepattern-tokenizer.asciidoc:39 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "simple_pattern", "pattern": "[0123456789]{3}" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="fd-786-335-514-x", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/0620a10ff15a2bb3eb489afc24ff0131.asciidoc000066400000000000000000000003101506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:342 [source, python] ---- resp = client.search( index="my-index-000001", size="surprise_me", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/06454a8e85e2d3479c90390bb955eb39.asciidoc000066400000000000000000000003771506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:589 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot*,-snapshot_3", sort="name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/066e0bdcdfa3b8afa5d1e5777f73fccb.asciidoc000066400000000000000000000005201506101734100271340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:333 [source, python] ---- resp = client.indices.rollover( alias="my-alias", conditions={ "max_age": "7d", "max_docs": 1000, "max_primary_shard_size": "50gb", "max_primary_shard_docs": "2000" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/069030e5f43d8f8ce3e3eca40205027e.asciidoc000066400000000000000000000023531506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/properties.asciidoc:17 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "manager": { "properties": { "age": { "type": "integer" }, "name": { "type": "text" } } }, "employees": { "type": "nested", "properties": { "age": { "type": "integer" }, "name": { "type": "text" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "region": "US", "manager": { "name": "Alice White", "age": 30 }, "employees": [ { "name": "John Smith", "age": 34 }, { "name": "Peter Brown", "age": 26 } ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/06a761823a694850a6efe5d5bf61478c.asciidoc000066400000000000000000000006431506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/match-enrich-policy-type-ex.asciidoc:44 [source, python] ---- resp = client.enrich.put_policy( name="users-policy", match={ "indices": "users", "match_field": "email", "enrich_fields": [ "first_name", "last_name", "city", "zip", "state" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/06b5d3d56c4d4e3b61ae42ea26401c40.asciidoc000066400000000000000000000007721506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/multi-search.asciidoc:16 [source, python] ---- resp = client.msearch( index="my-index-000001", searches=[ {}, { "query": { "match": { "message": "this is a test" } } }, { "index": "my-index-000002" }, { "query": { "match_all": {} } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/06c0db0f42223761e32fa418066b275f.asciidoc000066400000000000000000000006761506101734100262470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/snapshot/corrupt-repository.asciidoc:97 [source, python] ---- resp = client.snapshot.create_repository( name="my-repo", repository={ "type": "s3", "settings": { "bucket": "repo-bucket", "client": "elastic-internal-71bcd3", "base_path": "myrepo", "readonly": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/06d65e3505dcb306977185e8545cf4a8.asciidoc000066400000000000000000000004371506101734100263010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-cluster-shard-limit.asciidoc:172 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.total_shards_per_node": 400 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0709a38613d2de90d418ce12b36af30e.asciidoc000066400000000000000000000002731506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:113 [source, python] ---- resp = client.cluster.reroute() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/070cf72783cfe534a04f2f64e4016052.asciidoc000066400000000000000000000006501506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/subobjects.asciidoc:92 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "subobjects": False }, ) print(resp) resp1 = client.index( index="my-index-000001", id="metric_1", document={ "time": "100ms", "time.min": "10ms", "time.max": "900ms" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/0718a0b4f4905a8c90c1ff93de557e56.asciidoc000066400000000000000000000005511506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/extendedstats-aggregation.asciidoc:70 [source, python] ---- resp = client.search( index="exams", size=0, aggs={ "grades_stats": { "extended_stats": { "field": "grade", "sigma": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0721c8adec544d5ecea3fcc410e45feb.asciidoc000066400000000000000000000004241506101734100270430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/activate-user-profile.asciidoc:104 [source, python] ---- resp = client.security.activate_user_profile( grant_type="password", username="jacknich", password="l0ng-r4nd0m-p@ssw0rd", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0722b302b2b3275a988d858044f99d5d.asciidoc000066400000000000000000000003121506101734100262030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:45 [source, python] ---- resp = client.indices.get_mapping( index="kibana_sample_data_ecommerce", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0737ebaea33631f001fb3f4226948492.asciidoc000066400000000000000000000006531506101734100262530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geoip.asciidoc:237 [source, python] ---- resp = client.indices.create( index="my_ip_locations", mappings={ "properties": { "geoip": { "properties": { "location": { "type": "geo_point" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/073864d3f52f8f79aafdaa85a88ac46a.asciidoc000066400000000000000000000002761506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-cache.asciidoc:82 [source, python] ---- resp = client.security.clear_cached_realms( realms="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/074e4602d1ca54412380a40867d078bc.asciidoc000066400000000000000000000011421506101734100261560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/slowlog.asciidoc:180 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index.indexing.slowlog.threshold.index.warn": "10s", "index.indexing.slowlog.threshold.index.info": "5s", "index.indexing.slowlog.threshold.index.debug": "2s", "index.indexing.slowlog.threshold.index.trace": "500ms", "index.indexing.slowlog.source": "1000", "index.indexing.slowlog.reformat": True, "index.indexing.slowlog.include.user": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0755471d7dce4785d2e7ed0c10182ea3.asciidoc000066400000000000000000000003461506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/get-transform-stats.asciidoc:336 [source, python] ---- resp = client.transform.get_transform_stats( transform_id="ecommerce-customer-transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07a5fdeb7805cec1d28ba288b28f5ff5.asciidoc000066400000000000000000000003321506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/stop-job.asciidoc:81 [source, python] ---- resp = client.rollup.stop_job( id="sensor", wait_for_completion=True, timeout="10s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07ba3eaa931f2cf110052e3544db51f8.asciidoc000066400000000000000000000006331506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:884 [source, python] ---- resp = client.reindex( max_docs=10, source={ "index": "my-index-000001", "query": { "function_score": { "random_score": {}, "min_score": 0.9 } } }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07c07f6d497b1a3012aa4320f830e09e.asciidoc000066400000000000000000000005631506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-forget-follower.asciidoc:139 [source, python] ---- resp = client.ccr.forget_follower( index="leader_index", follower_cluster="follower_cluster", follower_index="follower_index", follower_index_uuid="vYpnaWPRQB6mNspmoCeYyA", leader_remote_cluster="leader_cluster", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07dadb9b0a774bd8e7f3527cf8a44afc.asciidoc000066400000000000000000000004661506101734100270110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/semantic-query.asciidoc:17 [source, python] ---- resp = client.search( index="my-index-000001", query={ "semantic": { "field": "inference_field", "query": "Best surfing places" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07de76cb0e7f11c7533788faf8c093c3.asciidoc000066400000000000000000000005771506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:205 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "title": { "type": "text" }, "labels": { "type": "flattened" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/07ec38b97601286ec106986a84e1e5f7.asciidoc000066400000000000000000000007361506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-set-query.asciidoc:49 [source, python] ---- resp = client.indices.create( index="job-candidates", mappings={ "properties": { "name": { "type": "keyword" }, "programming_languages": { "type": "keyword" }, "required_matches": { "type": "long" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/080c34d8151d02b760571e3a2899fa97.asciidoc000066400000000000000000000016241506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc:91 [source, python] ---- resp = client.indices.create( index="test", settings={ "analysis": { "filter": { "email": { "type": "pattern_capture", "preserve_original": True, "patterns": [ "([^@]+)", "(\\p{L}+)", "(\\d+)", "@(.+)" ] } }, "analyzer": { "email": { "tokenizer": "uax_url_email", "filter": [ "email", "lowercase", "unique" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/082e78c7a2061a7c4a52b494e5ede0e8.asciidoc000066400000000000000000000015431506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-vectors.asciidoc:64 [source, python] ---- resp = client.indices.create( index="my-rank-vectors-bit", mappings={ "properties": { "my_vector": { "type": "rank_vectors", "element_type": "bit" } } }, ) print(resp) resp1 = client.bulk( index="my-rank-vectors-bit", refresh=True, operations=[ { "index": { "_id": "1" } }, { "my_vector": [ 127, -127, 0, 1, 42 ] }, { "index": { "_id": "2" } }, { "my_vector": "8100012a7f" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/083b92e8ea264e49bf9fd40fc6a3094b.asciidoc000066400000000000000000000011211506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:264 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="my-e5-model", inference_config={ "service": "elasticsearch", "service_settings": { "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 3, "max_number_of_allocations": 10 }, "num_threads": 1, "model_id": ".multilingual-e5-small" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/083e514297c09e91211f0d168aef1b0b.asciidoc000066400000000000000000000007211506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/bi-directional-disaster-recovery.asciidoc:256 [source, python] ---- resp = client.update_by_query( index="logs-generic-default", query={ "match": { "event.sequence": "97" } }, script={ "source": "ctx._source.event.original = params.new_event", "lang": "painless", "params": { "new_event": "FOOBAR" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/086ec4c5d86bbf80fb80162e94037689.asciidoc000066400000000000000000000027351506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/weighted-tokens-query.asciidoc:21 [source, python] ---- resp = client.search( query={ "weighted_tokens": { "query_expansion_field": { "tokens": { "2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012 }, "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": False } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0881397074d261ccc2db514daf116c31.asciidoc000066400000000000000000000003411506101734100263150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:128 [source, python] ---- resp = client.security.get_api_key( id="VuaCfGcBCdbkQm-e5aOx", with_limited_by=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/08a76b3f5a8394d8f9084113334a260a.asciidoc000066400000000000000000000005601506101734100261760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/boxplot-aggregation.asciidoc:149 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_boxplot": { "boxplot": { "field": "load_time", "compression": 200 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/08c9af9dd519c011deedd406f3061836.asciidoc000066400000000000000000000026041506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/preview-datafeed.asciidoc:157 [source, python] ---- resp = client.ml.preview_datafeed( datafeed_config={ "indices": [ "kibana_sample_data_ecommerce" ], "query": { "bool": { "filter": [ { "term": { "_index": "kibana_sample_data_ecommerce" } } ] } }, "scroll_size": 1000 }, job_config={ "description": "Find customers spending an unusually high amount in an hour", "analysis_config": { "bucket_span": "1h", "detectors": [ { "detector_description": "High total sales", "function": "high_sum", "field_name": "taxful_total_price", "over_field_name": "customer_full_name.keyword" } ], "influencers": [ "customer_full_name.keyword", "category.keyword" ] }, "analysis_limits": { "model_memory_limit": "10mb" }, "data_description": { "time_field": "order_date", "time_format": "epoch_ms" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/08e08feb514b24006e13f258d617d873.asciidoc000066400000000000000000000002511506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:234 [source, python] ---- resp = client.get_script( id="calculate-score", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/08e79ca9fdcdfebb2c6a79e6837e649d.asciidoc000066400000000000000000000005561506101734100270430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cardinality-aggregation.asciidoc:229 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "tag_cardinality": { "cardinality": { "field": "tag", "missing": "N/A" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/08f20902821a4f7a73ce7b959c5bdbdc.asciidoc000066400000000000000000000006721506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/regexp-query.asciidoc:23 [source, python] ---- resp = client.search( query={ "regexp": { "user.id": { "value": "k.*y", "flags": "ALL", "case_insensitive": True, "max_determinized_states": 10000, "rewrite": "constant_score_blended" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/091200b658023db31dffc2f08a85a9cc.asciidoc000066400000000000000000000005131506101734100264560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/total-shards-per-node.asciidoc:174 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "routing.allocation.total_shards_per_node": -1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0957bbd535f58c97b12ffba90813d64c.asciidoc000066400000000000000000000003641506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:367 [source, python] ---- resp = client.indices.create( index="analyze_sample", settings={ "index.analyze.max_token_count": 20000 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/095d60b2cfc5004c97efc49f27287262.asciidoc000066400000000000000000000005721506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:198 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "fixed_interval": "30d" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/095e3f21941a9cc75f398389a075152d.asciidoc000066400000000000000000000011751506101734100262220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:1150 [source, python] ---- resp = client.ml.infer_trained_model( model_id="cross-encoder__ms-marco-tinybert-l-2-v2", docs=[ { "text_field": "Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers." }, { "text_field": "New York City is famous for the Metropolitan Museum of Art." } ], inference_config={ "text_similarity": { "text": "How many people live in Berlin?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09769561f082b50558fb7d8707719963.asciidoc000066400000000000000000000003141506101734100260140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2588 [source, python] ---- resp = client.nodes.stats( metric="ingest", filter_path="nodes.*.ingest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/099006ab11b52ea99693401dceee8bad.asciidoc000066400000000000000000000004321506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:220 [source, python] ---- resp = client.put_script( id="calculate-score", script={ "lang": "painless", "source": "Math.log(_score * 2) + params['my_modifier']" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09944369863fd8666d5301d717317276.asciidoc000066400000000000000000000007241506101734100257340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/condition-tokenfilter.asciidoc:22 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "condition", "filter": [ "lowercase" ], "script": { "source": "token.getTerm().length() < 5" } } ], text="THE QUICK BROWN FOX", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09a44b619a99f6bf3f01bd5e258fd22d.asciidoc000066400000000000000000000003271506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/keyword-tokenizer.asciidoc:15 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", text="New York", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09a478fe32a7b7d814083ffa5297bcdf.asciidoc000066400000000000000000000004051506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/fuzzy-query.asciidoc:29 [source, python] ---- resp = client.search( query={ "fuzzy": { "user.id": { "value": "ki" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09bdf9a7e22733d668476724042a406c.asciidoc000066400000000000000000000006731506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:131 [source, python] ---- resp = client.indices.put_index_template( name="timeseries_template", index_patterns=[ "timeseries" ], data_stream={}, template={ "settings": { "number_of_shards": 1, "number_of_replicas": 1, "index.lifecycle.name": "timeseries_policy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09cb1b18bf4033b4afafb25bd3dab12c.asciidoc000066400000000000000000000007341506101734100270210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rule-query.asciidoc:71 [source, python] ---- resp = client.search( query={ "rule": { "match_criteria": { "user_query": "pugs" }, "ruleset_ids": [ "my-ruleset" ], "organic": { "match": { "description": "puggles" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09ce0ec993c494ac01f01ef9815fcc4b.asciidoc000066400000000000000000000006671506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/grok-syntax.asciidoc:150 [source, python] ---- resp = client.indices.put_mapping( index="my-index", runtime={ "http.clientip": { "type": "ip", "script": "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09d617863a103c82fb4101e6165ea7fe.asciidoc000066400000000000000000000002771506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-all-query.asciidoc:11 [source, python] ---- resp = client.search( query={ "match_all": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/09e6e06ba562f4b9bac59455e9151a80.asciidoc000066400000000000000000000007001506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:523 [source, python] ---- resp = client.ml.evaluate_data_frame( index="animal_classification", evaluation={ "classification": { "actual_field": "animal_class", "metrics": { "auc_roc": { "class_name": "dog" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a3003fa5af850e415634b50b1029859.asciidoc000066400000000000000000000004721506101734100261570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/bi-directional-disaster-recovery.asciidoc:237 [source, python] ---- resp = client.search( index="logs-generic-default*", filter_path="hits.hits._index", query={ "match": { "event.sequence": "97" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a3186bf20b5359393406fc0cb433313.asciidoc000066400000000000000000000004051506101734100261520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:433 [source, python] ---- resp = client.sql.query( format="json", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, columnar=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a46ac2968a574ce145f197f10d30152.asciidoc000066400000000000000000000017201506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/getting-started.asciidoc:9 [source, python] ---- resp = client.bulk( index="library", refresh=True, operations=[ { "index": { "_id": "Leviathan Wakes" } }, { "name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561 }, { "index": { "_id": "Hyperion" } }, { "name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482 }, { "index": { "_id": "Dune" } }, { "name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a46cc8fe93e372909660a63dc52ae3b.asciidoc000066400000000000000000000004311506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:315 [source, python] ---- resp = client.indices.create( index="", aliases={ "my-alias": { "is_write_index": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a650401134f07e40216f0d0d1a66a32.asciidoc000066400000000000000000000002361506101734100261310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/allocation.asciidoc:126 [source, python] ---- resp = client.cat.allocation( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a6d56a66a2652ac6de68f8bd544a175.asciidoc000066400000000000000000000011171506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:115 [source, python] ---- resp = client.search( index="index1", query={ "query_string": { "query": "running with scissors", "fields": [ "comment", "comment.english" ] } }, highlight={ "order": "score", "fields": { "comment": { "matched_fields": [ "comment.english" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a701bdc7b6786026f40c0be8ebfc753.asciidoc000066400000000000000000000011311506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/ecommerce-tutorial.asciidoc:439 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_ecommerce", "query": { "bool": { "filter": { "term": { "currency": "EUR" } } } } }, latest={ "unique_key": [ "geoip.country_iso_code", "geoip.region_name" ], "sort": "order_date" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a758d9dec74d9e942cf41a06499234f.asciidoc000066400000000000000000000004151506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:287 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "counter": 1, "tags": [ "red" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a84c5b7c0793be745b13eaf13e94422.asciidoc000066400000000000000000000005131506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/total-shards-per-node.asciidoc:78 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "routing.allocation.total_shards_per_node": "2" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0a9173f3b22716c78653976dc4799eae.asciidoc000066400000000000000000000010401506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:131 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "product": { "terms": { "field": "product" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ac295efdabd59e7b1f1a4577535d942.asciidoc000066400000000000000000000004751506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:161 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence\n [ process where process.name == \"regsvr32.exe\" ]\n [ file where stringContains(file.name, \"scrobj.dll\") ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ac9e7dd7e4acba51888256326ed5ffe.asciidoc000066400000000000000000000004461506101734100267370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:287 [source, python] ---- resp = client.search( index="my-index-000001", track_total_hits=True, query={ "match": { "user.id": "elkbee" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ad86b582aff1235f37ccb2cc90adad5.asciidoc000066400000000000000000000003221506101734100267570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:151 [source, python] ---- resp = client.indices.open( index=".ds-my-data-stream-2099.03.07-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ad8edd10542ec2c4d5d8700d7e2ba97.asciidoc000066400000000000000000000010651506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-amazon-bedrock.asciidoc:162 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="amazon_bedrock_embeddings", inference_config={ "service": "amazonbedrock", "service_settings": { "access_key": "", "secret_key": "", "region": "us-east-1", "provider": "amazontitan", "model": "amazon.titan-embed-text-v2:0" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0adbce828234ca221e3d03b184296407.asciidoc000066400000000000000000000006761506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:84 [source, python] ---- resp = client.indices.put_mapping( index="my-index", runtime={ "http.clientip": { "type": "ip", "script": "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ade87c8cb0e3c188d2e3dce279d5cc2.asciidoc000066400000000000000000000012111506101734100267730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-filtering-api.asciidoc:122 [source, python] ---- resp = client.connector.update_filtering( connector_id="my-g-drive-connector", rules=[ { "field": "file_extension", "id": "exclude-txt-files", "order": 0, "policy": "exclude", "rule": "equals", "value": "txt" }, { "field": "_", "id": "DEFAULT", "order": 1, "policy": "include", "rule": "regex", "value": ".*" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0aff04881be21eea45375ec4f4f50e66.asciidoc000066400000000000000000000003101506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:89 [source, python] ---- resp = client.security.create_api_key( name="my-api-key", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0b1c5486f96bfa5db8db854c0178dbe5.asciidoc000066400000000000000000000006711506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-connect.asciidoc:44 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_one": { "seeds": [ "127.0.0.1:{remote-interface-default-port}" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0b47b0bef81b9b5eecfb3775695bd6ad.asciidoc000066400000000000000000000004761506101734100270140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // monitoring/production.asciidoc:96 [source, python] ---- resp = client.security.put_user( username="remote_monitor", password="changeme", roles=[ "remote_monitoring_agent" ], full_name="Internal Agent For Remote Monitoring", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0b4e50f1b5a0537cbb1a41276bb51c54.asciidoc000066400000000000000000000011031506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:167 [source, python] ---- resp = client.search( index="my-index-000001", runtime_mappings={ "day_of_week": { "type": "keyword", "script": { "source": "emit(doc['@timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } } }, aggs={ "day_of_week": { "terms": { "field": "day_of_week" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0b615ff4ef5a8847ee8109b2fd11619a.asciidoc000066400000000000000000000010021506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:243 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "match": { "message": "some message" } }, "script": { "id": "calculate-score", "params": { "my_modifier": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0b913fb9e010d877c0be015519cfddc6.asciidoc000066400000000000000000000021141506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/index-mgmt.asciidoc:177 [source, python] ---- resp = client.index( index="my-index-000001", document={ "@timestamp": "2019-05-18T15:57:27.541Z", "ip": "225.44.217.191", "extension": "jpg", "response": "200", "geo": { "coordinates": { "lat": 38.53146222, "lon": -121.7864906 } }, "url": "https://media-for-the-masses.theacademyofperformingartsandscience.org/uploads/charles-fullerton.jpg" }, ) print(resp) resp1 = client.index( index="my-index-000002", document={ "@timestamp": "2019-05-20T03:44:20.844Z", "ip": "198.247.165.49", "extension": "php", "response": "200", "geo": { "coordinates": { "lat": 37.13189556, "lon": -76.4929875 } }, "memory": 241720, "url": "https://theacademyofperformingartsandscience.org/people/type:astronauts/name:laurel-b-clark/profile" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/0b987b4101e016653a32d7b092d47e4c.asciidoc000066400000000000000000000015121506101734100262420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/object.asciidoc:46 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "region": { "type": "keyword" }, "manager": { "properties": { "age": { "type": "integer" }, "name": { "properties": { "first": { "type": "text" }, "last": { "type": "text" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0bc6155e0c88062a4d8490da49db3aa8.asciidoc000066400000000000000000000034471506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:812 [source, python] ---- resp = client.search( index="retrievers_example_nested", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "nested": { "path": "nested_field", "inner_hits": { "name": "nested_vector", "_source": False, "fields": [ "nested_field.paragraph_id" ] }, "query": { "knn": { "field": "nested_field.nested_vector", "query_vector": [ 1, 0, 0.5 ], "k": 10 } } } } } }, { "standard": { "query": { "term": { "topic": "ai" } } } } ], "rank_window_size": 10, "rank_constant": 1 } }, source=[ "topic" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0bcd380315ef4691b8c79df6ca53a85f.asciidoc000066400000000000000000000005541506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:397 [source, python] ---- resp = client.search( sort=[ { "price": { "unmapped_type": "long" } } ], query={ "term": { "product": "chocolate" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0bee07a581c5776e068f6f4efad5a399.asciidoc000066400000000000000000000005671506101734100266100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-across-clusters.asciidoc:194 [source, python] ---- resp = client.esql.async_query( format="json", query="\n FROM my-index-000001,cluster_one:my-index-000001,cluster_two:my-index*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", include_ccs_metadata=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c05c66cfe3a2169b1ec1aba77e26db2.asciidoc000066400000000000000000000004371506101734100267000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:274 [source, python] ---- resp = client.search( index="test", query={ "rank_feature": { "field": "pagerank", "saturation": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c2ca704a39dda8b3a7c5806ec6c6cf8.asciidoc000066400000000000000000000006731506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1377 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "http.client_ip": { "type": "ip", "script": "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip); \n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c2d9ac7e3f28d4d802e21cbbbcfeb34.asciidoc000066400000000000000000000003051506101734100270370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/recovery.asciidoc:118 [source, python] ---- resp = client.cat.recovery( v=True, h="i,s,t,ty,st,shost,thost,f,fp,b,bp", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c464965126cc09e6812716a145991d4.asciidoc000066400000000000000000000002501506101734100260420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-info.asciidoc:306 [source, python] ---- resp = client.nodes.info( node_id="ingest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c52af573c9401a2a687e86a4beb182b.asciidoc000066400000000000000000000006161506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:214 [source, python] ---- resp = client.ingest.put_pipeline( id="cbor-attachment", description="Extract attachment information", processors=[ { "attachment": { "field": "data", "remove_binary": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c688eecf4ebdffdbe1deae0983c3ed8.asciidoc000066400000000000000000000014041506101734100273120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/cumulative-cardinality-aggregation.asciidoc:46 [source, python] ---- resp = client.search( index="user_hits", size=0, aggs={ "users_per_day": { "date_histogram": { "field": "timestamp", "calendar_interval": "day" }, "aggs": { "distinct_users": { "cardinality": { "field": "user_id" } }, "total_new_users": { "cumulative_cardinality": { "buckets_path": "distinct_users" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c6f9c9da75293fae69659ac1d6329de.asciidoc000066400000000000000000000003341506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:181 [source, python] ---- resp = client.security.invalidate_token( refresh_token="vLBPvmAB6KvwvJZr27cS", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c6fc67c2dd1c1771cd866ce471d74e1.asciidoc000066400000000000000000000012201506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:212 [source, python] ---- resp = client.security.put_role_mapping( name="mapping4", roles=[ "superuser" ], enabled=True, rules={ "any": [ { "field": { "username": "esadmin" } }, { "field": { "groups": [ "cn=admins,dc=example,dc=com", "cn=other,dc=example,dc=com" ] } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c7c40cd17985c3dd32aeaadbafc4fce.asciidoc000066400000000000000000000006031506101734100272000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:926 [source, python] ---- resp = client.render_search_template( source={ "query": { "match": { "message": "{{^name_exists}}Hello World{{/name_exists}}" } } }, params={ "name_exists": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c892d328b73d38396aaef6d9cbcd36b.asciidoc000066400000000000000000000003061506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete.asciidoc:88 [source, python] ---- resp = client.delete( index="my-index-000001", id="1", routing="shard-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0c8be7aec84ea86b243904f5d4162f5a.asciidoc000066400000000000000000000005621506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:292 [source, python] ---- resp = client.search( index="cooking_blog", query={ "match": { "title": { "query": "fluffy pancakes breakfast", "minimum_should_match": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ca6aae1ab2f0be6127beea8a245374e.asciidoc000066400000000000000000000006551506101734100267540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:1004 [source, python] ---- resp = client.async_search.submit( index="my-index-000001,cluster*:my-index-000001,-cluster_three:*", query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0cee58617e75f493c5049d77be1c49f3.asciidoc000066400000000000000000000007211506101734100264450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/fuzzy-query.asciidoc:46 [source, python] ---- resp = client.search( query={ "fuzzy": { "user.id": { "value": "ki", "fuzziness": "AUTO", "max_expansions": 50, "prefix_length": 0, "transpositions": True, "rewrite": "constant_score_blended" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0cf29da4b9f0503bd1a79bdc883aadbc.asciidoc000066400000000000000000000012011506101734100270350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/avg-aggregation.asciidoc:45 [source, python] ---- resp = client.search( index="exams", size="0", runtime_mappings={ "grade.corrected": { "type": "double", "script": { "source": "emit(Math.min(100, doc['grade'].value * params.correction))", "params": { "correction": 1.2 } } } }, aggs={ "avg_corrected_grade": { "avg": { "field": "grade.corrected" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d0f7ece06f21e624d21b09804732f61.asciidoc000066400000000000000000000005261506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/avg-aggregation.asciidoc:92 [source, python] ---- resp = client.search( index="exams", size="0", aggs={ "grade_avg": { "avg": { "field": "grade", "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d30077cd34e93377a3a86f2ebd69415.asciidoc000066400000000000000000000006121506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/create-connector-api.asciidoc:118 [source, python] ---- resp = client.connector.put( connector_id="my-connector", index_name="search-google-drive", name="My Connector", description="My Connector to sync data to Elastic index from Google Drive", service_type="google_drive", language="en", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d49474511b236bc89e768c8ee91adf1.asciidoc000066400000000000000000000006411506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:24 [source, python] ---- resp = client.search( query={ "simple_query_string": { "query": "\"fried eggs\" +(eggplant | potato) -frittata", "fields": [ "title^5", "body" ], "default_operator": "and" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d54ddad2bf6f76aa5c35f53ba77748a.asciidoc000066400000000000000000000004321506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/porterstem-tokenfilter.asciidoc:28 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "porter_stem" ], text="the foxes jumping quickly", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d59af9dc556dc526b9394051efa800a.asciidoc000066400000000000000000000003111506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/ignore-missing-component-templates.asciidoc:91 [source, python] ---- resp = client.indices.rollover( alias="logs-foo-bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d689ac6e78be5d438f9b5d441be2b44.asciidoc000066400000000000000000000036351506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1191 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "term": { "topic": "elastic" } } } }, { "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } } ], "rank_window_size": 10, "rank_constant": 1 } } ], "rank_window_size": 10, "rank_constant": 1 } }, source=False, size=1, explain=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d8063b484a18f8672fb5ed8712c5c97.asciidoc000066400000000000000000000007761506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:305 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "foo", "bar" ], template={ "settings": { "number_of_shards": 3 } }, meta={ "description": "set number of shards to three", "serialization": { "class": "MyIndexTemplate", "id": 17 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0d94d76b7f00d0459d1f8c962c144dcd.asciidoc000066400000000000000000000021531506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:314 [source, python] ---- resp = client.security.put_role_mapping( name="mapping8", roles=[ "superuser" ], enabled=True, rules={ "all": [ { "any": [ { "field": { "dn": "*,ou=admin,dc=example,dc=com" } }, { "field": { "username": [ "es-admin", "es-system" ] } } ] }, { "field": { "groups": "cn=people,dc=example,dc=com" } }, { "except": { "field": { "metadata.terminated_date": None } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0da477cb8a7883539ce3ae7ac1e9c5cb.asciidoc000066400000000000000000000006031506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:89 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "prices": { "histogram": { "field": "price", "interval": 50, "min_doc_count": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0da747e9d98bae157d3520ff1b489ad4.asciidoc000066400000000000000000000005471506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-s3.asciidoc:45 [source, python] ---- resp = client.snapshot.create_repository( name="my_s3_repository", repository={ "type": "s3", "settings": { "bucket": "my-bucket", "client": "my-alternate-client" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0db06c3cba57cf442ac7fab89966e1e1.asciidoc000066400000000000000000000010411506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:76 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "my_id": "1", "text": "This is a question", "my_join_field": "question" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "my_id": "2", "text": "This is another question", "my_join_field": "question" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/0dd30ffe2f900dde86cc9bb601d5e68e.asciidoc000066400000000000000000000002541506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/nodes.asciidoc:387 [source, python] ---- resp = client.cat.nodes( v=True, h="id,ip,port,v,m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ddf705317d9c5095b4a1419a2e3bace.asciidoc000066400000000000000000000002601506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-app-privileges.asciidoc:101 [source, python] ---- resp = client.security.get_privileges() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0dfa9733c94bc43c6f14c7b6984c98fb.asciidoc000066400000000000000000000003301506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/component-templates.asciidoc:113 [source, python] ---- resp = client.cat.component_templates( name="my-template-*", v=True, s="name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0dfde6a9d953822fd4b3aa0121ddd8fb.asciidoc000066400000000000000000000007671506101734100270010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/search-application-render-query.asciidoc:119 [source, python] ---- resp = client.search_application.render_query( name="my-app", params={ "query_string": "my first query", "text_fields": [ { "name": "title", "boost": 5 }, { "name": "description", "boost": 1 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e0d8f652d7d29371b5ea7c7544385eb.asciidoc000066400000000000000000000010571506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:538 [source, python] ---- resp = client.search( index="amazon-bedrock-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "amazon_bedrock_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e118857b815b62118a30c042f079db1.asciidoc000066400000000000000000000005631506101734100261630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:262 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "quick brown f", "type": "phrase_prefix", "fields": [ "subject", "message" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e31b8ad176b31028becf9500989bcbd.asciidoc000066400000000000000000000010251506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-watsonx-ai.asciidoc:102 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="watsonx-embeddings", inference_config={ "service": "watsonxai", "service_settings": { "api_key": "", "url": "", "model_id": "ibm/slate-30m-english-rtrvr", "project_id": "", "api_version": "2024-03-14" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e3b4a48a3450cd99c95ec46d4701b58.asciidoc000066400000000000000000000016021506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filter-aggregation.asciidoc:167 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", aggs={ "hats": { "filter": { "term": { "type": "hat" } }, "aggs": { "avg_price": { "avg": { "field": "price" } } } }, "t_shirts": { "filter": { "term": { "type": "t-shirt" } }, "aggs": { "avg_price": { "avg": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e5d25c7bb738c42d471020d678e2966.asciidoc000066400000000000000000000004161506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/start-trained-model-deployment.asciidoc:206 [source, python] ---- resp = client.ml.start_trained_model_deployment( model_id="my_model", deployment_id="my_model_for_ingest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e71a18d1aac61720cdc6b3f91fe643f.asciidoc000066400000000000000000000005021506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:153 [source, python] ---- resp = client.search( query={ "simple_query_string": { "fields": [ "content" ], "query": "foo bar -baz" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0e84bb54b8a9a5387f252eeffeb1098e.asciidoc000066400000000000000000000013761506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:84 [source, python] ---- resp = client.watcher.put_watch( id="log_error_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "search": { "request": { "indices": [ "logs" ], "body": { "query": { "match": { "message": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ea146b178561bc8b9002bed8a35641f.asciidoc000066400000000000000000000003401506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/get-autoscaling-policy.asciidoc:75 [source, python] ---- resp = client.autoscaling.get_autoscaling_policy( name="my_autoscaling_policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ea2167ce7c87d311b20c4f8c698a8d0.asciidoc000066400000000000000000000015331506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:196 [source, python] ---- resp = client.search( slice={ "id": 0, "max": 2 }, query={ "match": { "message": "foo" } }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" }, ) print(resp) resp1 = client.search( slice={ "id": 1, "max": 2 }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==" }, query={ "match": { "message": "foo" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/0eae571e9e1c40a40cb4b1c9530a8987.asciidoc000066400000000000000000000004121506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/migrate-to-data-tiers.asciidoc:160 [source, python] ---- resp = client.ilm.migrate_to_data_tiers( legacy_template_to_delete="global-template", node_attribute="custom_attribute_name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0eb2c1284a9829224913a860190580d8.asciidoc000066400000000000000000000007701506101734100260410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/fingerprint-tokenfilter.asciidoc:76 [source, python] ---- resp = client.indices.create( index="fingerprint_example", settings={ "analysis": { "analyzer": { "whitespace_fingerprint": { "tokenizer": "whitespace", "filter": [ "fingerprint" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0ec2178fb0103862b47cc20bc5885972.asciidoc000066400000000000000000000005411506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/register-fs-repo.asciidoc:127 [source, python] ---- resp = client.snapshot.create_repository( name="my_fs_backup", repository={ "type": "fs", "settings": { "location": "my_fs_backup_location", "readonly": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0eccea755bd4f6dd47579a9022690546.asciidoc000066400000000000000000000006501506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-migration.asciidoc:133 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "my_remote": { "mode": "proxy", "proxy_address": "my.remote.cluster.com:9443" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f028f71f04c1d569fab402869565a84.asciidoc000066400000000000000000000005131506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:476 [source, python] ---- resp = client.indices.put_settings( index=".reindexed-v9-ml-anomalies-custom-example", settings={ "index": { "number_of_replicas": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f2e5e006b663a88ee99b130ab1b4844.asciidoc000066400000000000000000000012011506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:572 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": [ [ -70, 40 ], [ -71, 42 ] ], "order": "asc", "unit": "km" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f3a78296825d507dda6771f7ceb9d61.asciidoc000066400000000000000000000004021506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/allocation_filtering.asciidoc:22 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.exclude._ip": "10.0.0.1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f4583c56cfe5bd59eeb35bfba02957c.asciidoc000066400000000000000000000007721506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rank-eval.asciidoc:318 [source, python] ---- resp = client.rank_eval( index="my-index-000001", requests=[ { "id": "JFK query", "request": { "query": { "match_all": {} } }, "ratings": [] } ], metric={ "recall": { "k": 20, "relevant_rating_threshold": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f547926ebf092e19fc5fb433e9ac8c1.asciidoc000066400000000000000000000010151506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/porterstem-tokenfilter.asciidoc:97 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "whitespace", "filter": [ "lowercase", "porter_stem" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0f7aa40ad26d59a9268630b980a3d594.asciidoc000066400000000000000000000002761506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-template.asciidoc:61 [source, python] ---- resp = client.indices.simulate_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fa220ee3fb267020382f74aa70eb1e9.asciidoc000066400000000000000000000002701506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/state.asciidoc:157 [source, python] ---- resp = client.cluster.state( metric="_all", index="foo,bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fb472645116d58ddef89ca976d15a01.asciidoc000066400000000000000000000027431506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:471 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "@timestamp": 1516729294000, "model_number": "QVKC92Q", "measures": { "voltage": 5.2 } }, { "index": {} }, { "@timestamp": 1516642894000, "model_number": "QVKC92Q", "measures": { "voltage": 5.8 } }, { "index": {} }, { "@timestamp": 1516556494000, "model_number": "QVKC92Q", "measures": { "voltage": 5.1 } }, { "index": {} }, { "@timestamp": 1516470094000, "model_number": "QVKC92Q", "measures": { "voltage": 5.6 } }, { "index": {} }, { "@timestamp": 1516383694000, "model_number": "HG537PU", "measures": { "voltage": 4.2 } }, { "index": {} }, { "@timestamp": 1516297294000, "model_number": "HG537PU", "measures": { "voltage": 4 } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fb7705ddbf1fc2b65d2de2e00fe5769.asciidoc000066400000000000000000000014041506101734100267150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/scripted-metric-aggregation.asciidoc:63 [source, python] ---- resp = client.search( index="ledger", size="0", aggs={ "profit": { "scripted_metric": { "init_script": { "id": "my_init_script" }, "map_script": { "id": "my_map_script" }, "combine_script": { "id": "my_combine_script" }, "params": { "field": "amount" }, "reduce_script": { "id": "my_reduce_script" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fbca60a487f5f22a4d51d73b2434cc4.asciidoc000066400000000000000000000006441506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:37 [source, python] ---- resp = client.indices.create( index="elser-embeddings", mappings={ "properties": { "content_embedding": { "type": "sparse_vector" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fc4b589df5388da784c6d981e769e31.asciidoc000066400000000000000000000010361506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template-v1.asciidoc:155 [source, python] ---- resp = client.indices.put_template( name="template_1", index_patterns=[ "te*" ], settings={ "number_of_shards": 1 }, aliases={ "alias1": {}, "alias2": { "filter": { "term": { "user.id": "kimchy" } }, "routing": "shard-1" }, "{index}-alias": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fd08e14ad651827be53897a6bdaf0b8.asciidoc000066400000000000000000000003771506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-bool-prefix-query.asciidoc:13 [source, python] ---- resp = client.search( query={ "match_bool_prefix": { "message": "quick brown f" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/0fe74ccd098c742619805a7c0bd0fae6.asciidoc000066400000000000000000000003421506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/schedule-now-transform.asciidoc:58 [source, python] ---- resp = client.transform.schedule_now_transform( transform_id="ecommerce_transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/100d4e33158069f3caa32e8bfa0eb3d0.asciidoc000066400000000000000000000005171506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:175 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic": "runtime", "properties": { "@timestamp": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/102c7de25d13c87cf28839ada9f63c95.asciidoc000066400000000000000000000010741506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:213 [source, python] ---- resp = client.index( index="index", id="1", document={ "my_date": "2016-05-11T16:30:55.328Z" }, ) print(resp) resp1 = client.search( index="index", query={ "constant_score": { "filter": { "range": { "my_date": { "gte": "now-1h", "lte": "now" } } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/103296e16b4233926ad1f07360385606.asciidoc000066400000000000000000000024151506101734100257500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1794 [source, python] ---- resp = client.indices.create( index="turkish_example", settings={ "analysis": { "filter": { "turkish_stop": { "type": "stop", "stopwords": "_turkish_" }, "turkish_lowercase": { "type": "lowercase", "language": "turkish" }, "turkish_keywords": { "type": "keyword_marker", "keywords": [ "örnek" ] }, "turkish_stemmer": { "type": "stemmer", "language": "turkish" } }, "analyzer": { "rebuilt_turkish": { "tokenizer": "standard", "filter": [ "apostrophe", "turkish_lowercase", "turkish_stop", "turkish_keywords", "turkish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1070e59ba144cdf309fd9b2591612b95.asciidoc000066400000000000000000000005251506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/refresh.asciidoc:98 [source, python] ---- resp = client.index( index="test", id="3", document={ "test": "test" }, ) print(resp) resp1 = client.index( index="test", id="4", refresh=False, document={ "test": "test" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/10796a4efa3c2a5e9e50b6bdeb08bbb9.asciidoc000066400000000000000000000015301506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-desired-nodes.asciidoc:80 [source, python] ---- resp = client.perform_request( "PUT", "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/100", headers={"Content-Type": "application/json"}, body={ "nodes": [ { "settings": { "node.name": "instance-000187", "node.external_id": "instance-000187", "node.roles": [ "data_hot", "master" ], "node.attr.data": "hot", "node.attr.logical_availability_zone": "zone-0" }, "processors": 8, "memory": "58gb", "storage": "2tb" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/109db8ff7b715aca98de8ef1ab7e44ab.asciidoc000066400000000000000000000003061506101734100270640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-resume-follow.asciidoc:43 [source, python] ---- resp = client.ccr.resume_follow( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10a16abe990288253ea25a1b1712fe3d.asciidoc000066400000000000000000000006521506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-user.asciidoc:232 [source, python] ---- resp = client.perform_request( "POST", "/_security/_query/user", params={ "with_profile_uid": "true" }, headers={"Content-Type": "application/json"}, body={ "query": { "prefix": { "roles": "other" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10b924bf6298aa6157ed00ce12f8edc1.asciidoc000066400000000000000000000021401506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:369 [source, python] ---- resp = client.watcher.execute_watch( ignore_condition=True, watch={ "trigger": { "schedule": { "interval": "10s" } }, "input": { "search": { "request": { "indices": [ "logs" ], "body": { "query": { "match": { "message": "error" } } } } } }, "condition": { "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, "actions": { "log_error": { "logging": { "text": "Found {{ctx.payload.hits.total}} errors in the logs" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10d8b17e73d31dcd907de67327ed78a2.asciidoc000066400000000000000000000026511506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:578 [source, python] ---- resp = client.indices.create( index="dutch_example", settings={ "analysis": { "filter": { "dutch_stop": { "type": "stop", "stopwords": "_dutch_" }, "dutch_keywords": { "type": "keyword_marker", "keywords": [ "voorbeeld" ] }, "dutch_stemmer": { "type": "stemmer", "language": "dutch" }, "dutch_override": { "type": "stemmer_override", "rules": [ "fiets=>fiets", "bromfiets=>bromfiets", "ei=>eier", "kind=>kinder" ] } }, "analyzer": { "rebuilt_dutch": { "tokenizer": "standard", "filter": [ "lowercase", "dutch_stop", "dutch_keywords", "dutch_override", "dutch_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10d9da8a3b7061479be908c8c5c76cfb.asciidoc000066400000000000000000000003021506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:223 [source, python] ---- resp = client.security.get_api_key( realm_name="native1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10de9fd4a38755020a07c4ec964d44c9.asciidoc000066400000000000000000000005301506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/oidc-guide.asciidoc:431 [source, python] ---- resp = client.security.put_role_mapping( name="oidc-example", roles=[ "example_role" ], enabled=True, rules={ "field": { "realm.name": "oidc1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10e4c1f246ada8c6b500d8ea6c1e335f.asciidoc000066400000000000000000000007451506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:298 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "standard_shingle": { "tokenizer": "standard", "filter": [ "shingle" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10f0c8fed98455c460c374b50ffbb204.asciidoc000066400000000000000000000003461506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:301 [source, python] ---- resp = client.indices.rollover( alias="dsl-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/10f7a2c0a952ba3bc3d20b7d5f310f41.asciidoc000066400000000000000000000002731506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/list-search-applications.asciidoc:99 [source, python] ---- resp = client.search_application.list() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/111c31db1fd29baeaa9964eafaea6789.asciidoc000066400000000000000000000005771506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/run-as-privilege.asciidoc:184 [source, python] ---- resp = client.security.put_user( username="analyst_user", refresh=True, password="l0nger-r4nd0mer-p@ssw0rd", roles=[ "my_analyst_role" ], full_name="Monday Jaffe", metadata={ "innovation": 8 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/111c69ca94162c1523b799a5c14723dd.asciidoc000066400000000000000000000003701506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/term-query.asciidoc:118 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "full_text": "Quick Brown Foxes!" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1147a02afa087278e51fa365fb9e06b7.asciidoc000066400000000000000000000002341506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // api-conventions.asciidoc:355 [source, python] ---- resp = client.search( size="1000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/114d470e752efa9672ca68d7290fada8.asciidoc000066400000000000000000000003101506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/add-alias.asciidoc:16 [source, python] ---- resp = client.indices.put_alias( index="my-data-stream", name="my-alias", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1153bd92ca18356db927054958cd95c6.asciidoc000066400000000000000000000006101506101734100262660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:269 [source, python] ---- resp = client.search( query={ "function_score": { "field_value_factor": { "field": "my-int", "factor": 1.2, "modifier": "sqrt", "missing": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/115529722ba30b0b0d51a7ff87e59198.asciidoc000066400000000000000000000002731506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-roles.asciidoc:64 [source, python] ---- resp = client.security.get_role( name="my_admin_role", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/118f249a3b26c33416f641b33f2b74f8.asciidoc000066400000000000000000000012731506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pattern-tokenizer.asciidoc:128 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "pattern", "pattern": "," } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="comma,separated,values", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/11be807bdeaeecc8174dec88e0851ea7.asciidoc000066400000000000000000000004461506101734100270060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:437 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", params={ "connector_id": "my-connector-id", "size": "1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/11c395d1649733bcab853fe31ec393b2.asciidoc000066400000000000000000000002241506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/get-license.asciidoc:62 [source, python] ---- resp = client.license.get() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/11c43c4aa5435f8a99dcc0d1f03c648f.asciidoc000066400000000000000000000005101506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/max-aggregation.asciidoc:99 [source, python] ---- resp = client.search( index="sales", aggs={ "grade_max": { "max": { "field": "grade", "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/11d9043d3050a7175069dec7e0adc963.asciidoc000066400000000000000000000003531506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/regexp-syntax.asciidoc:50 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "my_field": "a\\b" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/11e772ff5dbb73408ae30a1a367a0d9b.asciidoc000066400000000000000000000002621506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-pipeline.asciidoc:97 [source, python] ---- resp = client.ingest.delete_pipeline( id="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/11e8d6e14686efabb8634b6522c05cb5.asciidoc000066400000000000000000000007271506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:467 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "pre_tags": [ "", "" ], "post_tags": [ "", "" ], "fields": { "body": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/120fcf9f55128d6a81d5e87a9c235bbd.asciidoc000066400000000000000000000005761506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/chat-completion-inference.asciidoc:305 [source, python] ---- resp = client.inference.stream_inference( task_type="chat_completion", inference_id="openai-completion", model="gpt-4o", messages=[ { "role": "user", "content": "What is Elastic?" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1233be1d4c9c7ca54126f1a0693b26de.asciidoc000066400000000000000000000012631506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:104 [source, python] ---- resp = client.index( index="my-index-000001", id="3", routing="1", refresh=True, document={ "my_id": "3", "text": "This is an answer", "my_join_field": { "name": "answer", "parent": "1" } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="4", routing="1", refresh=True, document={ "my_id": "4", "text": "This is another answer", "my_join_field": { "name": "answer", "parent": "1" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/123693835b3b85b9a2fa6fd1d3ad89c7.asciidoc000066400000000000000000000006041506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/routing-field.asciidoc:20 [source, python] ---- resp = client.index( index="my-index-000001", id="1", routing="user1", refresh=True, document={ "title": "This is a document" }, ) print(resp) resp1 = client.get( index="my-index-000001", id="1", routing="user1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/12433d2b637d002e8d5c9a1adce69d3b.asciidoc000066400000000000000000000002611506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:106 [source, python] ---- resp = client.indices.create( index="publications", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1252fa45847edba5ec2b2f33da70ec5b.asciidoc000066400000000000000000000003141506101734100266770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:125 [source, python] ---- resp = client.cluster.state( filter_path="routing_table.indices.**.state", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1259a9c151730e42de35bb2d1ba700c6.asciidoc000066400000000000000000000003101506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-mapping.asciidoc:76 [source, python] ---- resp = client.indices.get_mapping( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/128283698535116931dca9d16a16dca2.asciidoc000066400000000000000000000002401506101734100261710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-roles.asciidoc:99 [source, python] ---- resp = client.security.get_role() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1295f51b9e5d4ba9987b02478146b50b.asciidoc000066400000000000000000000006371506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/high-jvm-memory-pressure.asciidoc:76 [source, python] ---- resp = client.indices.put_settings( settings={ "index.max_result_window": 5000 }, ) print(resp) resp1 = client.cluster.put_settings( persistent={ "search.max_buckets": 20000, "search.allow_expensive_queries": False }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/12adea5d76f73d94d80d42f53f67563f.asciidoc000066400000000000000000000003351506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:393 [source, python] ---- resp = client.indices.add_block( index=".ml-anomalies-custom-example", block="read_only", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/12cb446446211f95f651e196a1f059b4.asciidoc000066400000000000000000000003751506101734100262020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:302 [source, python] ---- resp = client.snapshot.create( repository="my_repository", snapshot="my_snapshot", wait_for_completion=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/12d5ff4b8d3d832b32a7e7e2a520d0bb.asciidoc000066400000000000000000000004171506101734100266200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-calendar-event.asciidoc:162 [source, python] ---- resp = client.ml.get_calendar_events( calendar_id="planned-outages", start="1635638400000", end="1635724800000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/12e9e758f7f18a6cbf27e9d0aea57a19.asciidoc000066400000000000000000000005471506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-managed-service.asciidoc:167 [source, python] ---- resp = client.update( index=".elastic-connectors", id="connector_id", doc={ "features": { "native_connector_api_keys": { "enabled": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/12ec704d62ffedcb03787e6aba69d382.asciidoc000066400000000000000000000007001506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:374 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "stop", "stopwords": [ "a" ] }, { "type": "shingle", "filler_token": "+" } ], text="fox jumps a lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/12facf3617a41551ce2f0c4d005cb1c7.asciidoc000066400000000000000000000010161506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:82 [source, python] ---- resp = client.indices.create( index="movies", mappings={ "properties": { "name_and_plot": { "type": "text" }, "name": { "type": "text", "copy_to": "name_and_plot" }, "plot": { "type": "text", "copy_to": "name_and_plot" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1302e24b0476e0e9af7a2c890edf9f62.asciidoc000066400000000000000000000004471506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:406 [source, python] ---- resp = client.search( index="my-index-000001", track_total_hits=False, query={ "match": { "user.id": "elkbee" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1313c540fef7e7c18a066f07789673fc.asciidoc000066400000000000000000000004621506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:673 [source, python] ---- resp = client.sql.get_async( id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", keep_alive="5d", wait_for_completion_timeout="2s", format="json", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/132ea3d5a0ffb6b5203e356e8329f679.asciidoc000066400000000000000000000011431506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:315 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/134384b8c63cfbd8d762fb01757bb3f9.asciidoc000066400000000000000000000007161506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/constant-keyword.asciidoc:40 [source, python] ---- resp = client.index( index="logs-debug", document={ "date": "2019-12-12", "message": "Starting up Elasticsearch", "level": "debug" }, ) print(resp) resp1 = client.index( index="logs-debug", document={ "date": "2019-12-12", "message": "Starting up Elasticsearch" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/135819da3a4bde684357c57a49ad8e85.asciidoc000066400000000000000000000002441506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/deprecation.asciidoc:67 [source, python] ---- resp = client.migration.deprecations() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13670d1534125831c2059eebd86d840c.asciidoc000066400000000000000000000021461506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:283 [source, python] ---- resp = client.indices.create( index="brazilian_example", settings={ "analysis": { "filter": { "brazilian_stop": { "type": "stop", "stopwords": "_brazilian_" }, "brazilian_keywords": { "type": "keyword_marker", "keywords": [ "exemplo" ] }, "brazilian_stemmer": { "type": "stemmer", "language": "brazilian" } }, "analyzer": { "rebuilt_brazilian": { "tokenizer": "standard", "filter": [ "lowercase", "brazilian_stop", "brazilian_keywords", "brazilian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/136ae86b8d497dda799cf1cb583df929.asciidoc000066400000000000000000000013121506101734100266120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:80 [source, python] ---- resp = client.indices.create( index="publications", mappings={ "properties": { "id": { "type": "text" }, "title": { "type": "text" }, "abstract": { "type": "text" }, "author": { "properties": { "id": { "type": "text" }, "name": { "type": "text" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/137709a0a0dc38d6094291c9fc75b804.asciidoc000066400000000000000000000004151506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:348 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "counter": 1, "tags": [ "production" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/137c62a4443bdd7d5b95a15022a9dc30.asciidoc000066400000000000000000000022411506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:86 [source, python] ---- resp = client.indices.create( index="arabic_example", settings={ "analysis": { "filter": { "arabic_stop": { "type": "stop", "stopwords": "_arabic_" }, "arabic_keywords": { "type": "keyword_marker", "keywords": [ "مثال" ] }, "arabic_stemmer": { "type": "stemmer", "language": "arabic" } }, "analyzer": { "rebuilt_arabic": { "tokenizer": "standard", "filter": [ "lowercase", "decimal_digit", "arabic_stop", "arabic_normalization", "arabic_keywords", "arabic_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/138f7703c47ddf63633fdf5ca9bc7fa4.asciidoc000066400000000000000000000004341506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:391 [source, python] ---- resp = client.index( index="my-index-000001", id="2", routing="user1", document={ "counter": 1, "tags": [ "env2" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13917f7cfb6a382c293275ff71134ec4.asciidoc000066400000000000000000000007241506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:947 [source, python] ---- resp = client.render_search_template( source={ "query": { "match": { "message": "Hello {{#name_exists}}{{query_string}}{{/name_exists}}{{^name_exists}}World{{/name_exists}}" } } }, params={ "query_string": "Kimchy", "name_exists": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13b02da42d3afe7f0b649e1c98ac9549.asciidoc000066400000000000000000000013141506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keep-types-tokenfilter.asciidoc:185 [source, python] ---- resp = client.indices.create( index="keep_types_example", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "extract_alpha" ] } }, "filter": { "extract_alpha": { "type": "keep_types", "types": [ "" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13cc51ca3a783cdbb1f1d353eaedbf23.asciidoc000066400000000000000000000003731506101734100270310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/troubleshooting.asciidoc:114 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.xpack.security.authc": "debug" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13d90ba227131aefbf4fcfd5992e662a.asciidoc000066400000000000000000000017031506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:159 [source, python] ---- resp = client.search( query={ "bool": { "should": [ { "match": { "name.first": { "query": "shay", "_name": "first" } } }, { "match": { "name.last": { "query": "banon", "_name": "last" } } } ], "filter": { "terms": { "name.last": [ "banon", "kimchy" ], "_name": "test" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13d91782399ba1f291e103c18b5338cc.asciidoc000066400000000000000000000010241506101734100262510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/create-index-from-source.asciidoc:94 [source, python] ---- resp = client.indices.create_from( source="my-index", dest="my-new-index", create_from={ "settings_override": { "index": { "number_of_shards": 5 } }, "mappings_override": { "properties": { "field2": { "type": "boolean" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13df08eefc9ba98e311793bbca74133b.asciidoc000066400000000000000000000004021506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-user-profile.asciidoc:115 [source, python] ---- resp = client.security.get_user_profile( uid="u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", data="app1.key1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13e3fefbf55f672926aa389d76fc8bea.asciidoc000066400000000000000000000004031506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/securing-communications/change-passwords-native-users.asciidoc:42 [source, python] ---- resp = client.security.change_password( username="user1", password="new-test-password", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13ebcb01ebf1b5d2b5c52739db47e30c.asciidoc000066400000000000000000000003011506101734100266640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/recovery.asciidoc:185 [source, python] ---- resp = client.indices.recovery( index="index1,index2", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13ecdf99114098c76b050397d9c3d4e6.asciidoc000066400000000000000000000004771506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/post-inference.asciidoc:202 [source, python] ---- resp = client.inference.inference( task_type="sparse_embedding", inference_id="my-elser-model", input="The sky above the port was the color of television tuned to a dead channel.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/13fe12cdb73bc89f07a83f1e6b127511.asciidoc000066400000000000000000000010421506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:208 [source, python] ---- resp = client.indices.create( index="google-vertex-ai-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 768, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/141ef0ebaa3b0772892b79b9bb85efb0.asciidoc000066400000000000000000000004621506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/update-inference.asciidoc:83 [source, python] ---- resp = client.inference.update( inference_id="my-inference-endpoint", inference_config={ "service_settings": { "api_key": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14254a0e725044faedf9370ead76f6ce.asciidoc000066400000000000000000000003351506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:465 [source, python] ---- resp = client.search( q="user.id:elkbee", size="0", terminate_after="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/142de21c40e84e2e2d8d832e5b3b36db.asciidoc000066400000000000000000000003221506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/migrate-to-data-tiers-routing-guide.asciidoc:175 [source, python] ---- resp = client.ilm.migrate_to_data_tiers() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1445ca2e813ed1c25504107b4b11760e.asciidoc000066400000000000000000000004301506101734100262210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/getting-started.asciidoc:205 [source, python] ---- resp = client.ccr.follow( index="server-metrics-follower", wait_for_active_shards="1", remote_cluster="leader", leader_index="server-metrics", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1452829804551d2d6acedd4e73b29637.asciidoc000066400000000000000000000003221506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/ignore-missing-component-templates.asciidoc:62 [source, python] ---- resp = client.indices.create_data_stream( name="logs-foo-bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/146bd22fd0e7be2345619e8f11d3a4cb.asciidoc000066400000000000000000000003721506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:253 [source, python] ---- resp = client.cat.tasks( v=True, s="time:desc", h="type,action,running_time,node,cancellable", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/147d341cb212dcc015c129a9c5dcf9c9.asciidoc000066400000000000000000000004341506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/put-trained-models-aliases.asciidoc:87 [source, python] ---- resp = client.ml.put_trained_model_alias( model_id="flight-delay-prediction-1574775339910", model_alias="flight_delay_model", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/148edc235fcfbc263561f87f5533e688.asciidoc000066400000000000000000000011601506101734100264400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:196 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "documents": [ { "message": "bonsai tree" }, { "message": "new tree" }, { "message": "the office" }, { "message": "office tree" } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14936b96cfb8ff999a833f615ba75495.asciidoc000066400000000000000000000007341506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:518 [source, python] ---- resp = client.search( index="bicycles,other_cycles", query={ "bool": { "must": { "match": { "description": "dutch" } }, "filter": { "term": { "cycle_type": "bicycle" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/149a0eea54cdf6ea3052af6dba2d2a63.asciidoc000066400000000000000000000006401506101734100267520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-set-priority.asciidoc:29 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "set_priority": { "priority": 50 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14a1db30e13eb1d03cfd9710ca847ebb.asciidoc000066400000000000000000000012601506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc:65 [source, python] ---- resp = client.bulk( index="my-data-stream", operations=[ { "create": {} }, { "@timestamp": "2099-05-06T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" }, { "create": {} }, { "@timestamp": "2099-05-06T16:25:42.000Z", "message": "192.0.2.255 - - [06/May/2099:16:25:42 +0000] \"GET /favicon.ico HTTP/1.0\" 200 3638" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14a33c364873c2f930ca83d0a3005389.asciidoc000066400000000000000000000003661506101734100261700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/disk-usage-exceeded.asciidoc:46 [source, python] ---- resp = client.cluster.allocation_explain( index="my-index", shard=0, primary=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14af7e2899e64f231068bded6aaf9ec5.asciidoc000066400000000000000000000007301506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/dynamic.asciidoc:27 [source, python] ---- resp = client.index( index="my-index-000001", id="2", document={ "username": "marywhite", "email": "mary@white.com", "name": { "first": "Mary", "middle": "Alice", "last": "White" } }, ) print(resp) resp1 = client.indices.get_mapping( index="my-index-000001", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/14afe65afee3d43f27aaaa5b37f26a31.asciidoc000066400000000000000000000005321506101734100267600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:164 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "Point", "coordinates": [ -77.03653, 38.897676 ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14b81f96297952970b78a3216e059596.asciidoc000066400000000000000000000003501506101734100260100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/async-search.asciidoc:159 [source, python] ---- resp = client.async_search.get( id="FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14f124294a4a0e3a657d1468c36161cd.asciidoc000066400000000000000000000004621506101734100262440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:205 [source, python] ---- resp = client.search( index="stats-index", query={ "term": { "agg_metric": { "value": 702.3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/14f2dab0583c5a9fcc39931d33194872.asciidoc000066400000000000000000000002661506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:296 [source, python] ---- resp = client.search( index="sample_weblogs_by_clientip", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/150b5fee5678bf8cdf0932da73eada80.asciidoc000066400000000000000000000011731506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2556 [source, python] ---- resp = client.nodes.stats( metric="indices", index_metric="fielddata", fields="field1,field2", ) print(resp) resp1 = client.nodes.stats( metric="indices", index_metric="fielddata", level="indices", fields="field1,field2", ) print(resp1) resp2 = client.nodes.stats( metric="indices", index_metric="fielddata", level="shards", fields="field1,field2", ) print(resp2) resp3 = client.nodes.stats( metric="indices", index_metric="fielddata", fields="field*", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/151d2b11807ec684b0c01aa89189a801.asciidoc000066400000000000000000000005661506101734100262440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:474 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "title", "content" ], "query": "this that thus", "minimum_should_match": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1522a9297151d7046e6345b9b27539ca.asciidoc000066400000000000000000000007011506101734100261200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:340 [source, python] ---- resp = client.connector.update_configuration( connector_id="my-connector-id", values={ "host": "127.0.0.1", "port": 5432, "username": "myuser", "password": "mypassword", "database": "chinook", "schema": "public", "tables": "album,artist" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/154d703732daf5c5fcd0122e6a50213f.asciidoc000066400000000000000000000005131506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:339 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "measures.start": { "type": "long" }, "measures.end": { "type": "long" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/156bc64c94f9f3334fbce25165d2286a.asciidoc000066400000000000000000000006541506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/index-sorting.asciidoc:15 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "sort.field": "date", "sort.order": "desc" } }, mappings={ "properties": { "date": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1570976f7807b88dc8a046b833be057b.asciidoc000066400000000000000000000004121506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:34 [source, python] ---- resp = client.cat.nodes( v=True, s="master,name", h="name,master,node.role,heap.percent,disk.used_percent,cpu", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1572696b97822d3332be51700e09672f.asciidoc000066400000000000000000000005611506101734100260500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:130 [source, python] ---- resp = client.search( index="range_index", query={ "range": { "time_frame": { "gte": "2015-10-31", "lte": "2015-11-01", "relation": "within" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1598a0fec6b1ca78cadbaba65f465196.asciidoc000066400000000000000000000014171506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pattern-tokenizer.asciidoc:216 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "pattern", "pattern": "\"((?:\\\\\"|[^\"]|\\\\\")+)\"", "group": 1 } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="\"value\", \"value with embedded \\\" quote\"", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/15a34bfe0ef8ef6333c8c7b55c011e5d.asciidoc000066400000000000000000000003141506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:275 [source, python] ---- resp = client.indices.analyze( filter=[ "lowercase" ], text="BaR", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15ac33d641b376d9494075eb1f0d4066.asciidoc000066400000000000000000000003221506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:224 [source, python] ---- resp = client.indices.cancel_migrate_reindex( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15c76cc8a038f686395053a240262929.asciidoc000066400000000000000000000007441506101734100260530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/classic-tokenfilter.asciidoc:132 [source, python] ---- resp = client.indices.create( index="classic_example", settings={ "analysis": { "analyzer": { "classic_analyzer": { "tokenizer": "classic", "filter": [ "classic" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15d4be58359542775f4aff88e6d8adb5.asciidoc000066400000000000000000000005761506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:135 [source, python] ---- resp = client.ingest.simulate( id="my-pipeline", docs=[ { "_source": { "my-keyword-field": "FOO" } }, { "_source": { "my-keyword-field": "BAR" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15d948d593d2624ac5e2b155052048f0.asciidoc000066400000000000000000000004531506101734100261740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc:24 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "keyword_repeat", "stemmer" ], text="jumping dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15e90b82827c8512670820cf856a9c71.asciidoc000066400000000000000000000007061506101734100261350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/date-index-name.asciidoc:23 [source, python] ---- resp = client.ingest.put_pipeline( id="monthlyindex", description="monthly date-time index naming", processors=[ { "date_index_name": { "field": "date1", "index_name_prefix": "my-index-", "date_rounding": "M" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/15f769bbd7b5fddeb3353ae726b71b14.asciidoc000066400000000000000000000033151506101734100266410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:405 [source, python] ---- resp = client.search( index="my-index-bit-vectors", query={ "script_score": { "query": { "match_all": {} }, "script": { "source": "dotProduct(params.query_vector, 'my_dense_vector')", "params": { "query_vector": [ 0.23, 1.45, 3.67, 4.89, -0.56, 2.34, 3.21, 1.78, -2.45, 0.98, -0.12, 3.45, 4.56, 2.78, 1.23, 0.67, 3.89, 4.12, -2.34, 1.56, 0.78, 3.21, 4.12, 2.45, -1.67, 0.34, -3.45, 4.56, -2.78, 1.23, -0.67, 3.89, -4.34, 2.12, -1.56, 0.78, -3.21, 4.45, 2.12, 1.67 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1605be45a5711d1929d6ad2d1ae0f797.asciidoc000066400000000000000000000003411506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/discovery/voting.asciidoc:26 [source, python] ---- resp = client.cluster.state( filter_path="metadata.cluster_coordination.last_committed_config", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/160de80948e0c7db49b1c311848a66a2.asciidoc000066400000000000000000000016521506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:161 [source, python] ---- resp = client.watcher.put_watch( id="log_error_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "search": { "request": { "indices": [ "logs" ], "body": { "query": { "match": { "message": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, actions={ "log_error": { "logging": { "text": "Found {{ctx.payload.hits.total}} errors in the logs" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/160f39a50847bad0be4be1529a95e4ce.asciidoc000066400000000000000000000035171506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1140 [source, python] ---- resp = client.indices.create( index="irish_example", settings={ "analysis": { "filter": { "irish_hyphenation": { "type": "stop", "stopwords": [ "h", "n", "t" ], "ignore_case": True }, "irish_elision": { "type": "elision", "articles": [ "d", "m", "b" ], "articles_case": True }, "irish_stop": { "type": "stop", "stopwords": "_irish_" }, "irish_lowercase": { "type": "lowercase", "language": "irish" }, "irish_keywords": { "type": "keyword_marker", "keywords": [ "sampla" ] }, "irish_stemmer": { "type": "stemmer", "language": "irish" } }, "analyzer": { "rebuilt_irish": { "tokenizer": "standard", "filter": [ "irish_hyphenation", "irish_elision", "irish_lowercase", "irish_stop", "irish_keywords", "irish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/16239fe9f0b0dcfd5ea64c08c6fed21d.asciidoc000066400000000000000000000012011506101734100267710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/reverse-nested-aggregation.asciidoc:22 [source, python] ---- resp = client.indices.create( index="issues", mappings={ "properties": { "tags": { "type": "keyword" }, "comments": { "type": "nested", "properties": { "username": { "type": "keyword" }, "comment": { "type": "text" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/162b5b693b713f0bfab1209d59443c46.asciidoc000066400000000000000000000005021506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:133 [source, python] ---- resp = client.search( query={ "constant_score": { "filter": { "term": { "status": "active" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/16351d99d0608789d04a0bb11a537098.asciidoc000066400000000000000000000007601506101734100261210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/edgengram-tokenfilter.asciidoc:143 [source, python] ---- resp = client.indices.create( index="edge_ngram_example", settings={ "analysis": { "analyzer": { "standard_edge_ngram": { "tokenizer": "standard", "filter": [ "edge_ngram" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1637ef51d673b35cc8894ee80cd61c87.asciidoc000066400000000000000000000003101506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/high-cpu-usage.asciidoc:28 [source, python] ---- resp = client.cat.nodes( v=True, s="cpu:desc", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1648dd31d0fef01e7504ebeb687f4f30.asciidoc000066400000000000000000000023331506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:92 [source, python] ---- resp = client.index( index="test", id="1", refresh=True, document={ "url": "https://en.wikipedia.org/wiki/2016_Summer_Olympics", "content": "Rio 2016", "pagerank": 50.3, "url_length": 42, "topics": { "sports": 50, "brazil": 30 } }, ) print(resp) resp1 = client.index( index="test", id="2", refresh=True, document={ "url": "https://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", "content": "Formula One motor race held on 13 November 2016", "pagerank": 50.3, "url_length": 47, "topics": { "sports": 35, "formula one": 65, "brazil": 20 } }, ) print(resp1) resp2 = client.index( index="test", id="3", refresh=True, document={ "url": "https://en.wikipedia.org/wiki/Deadpool_(film)", "content": "Deadpool is a 2016 American superhero film", "pagerank": 50.3, "url_length": 37, "topics": { "movies": 60, "super hero": 65 } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/16535685833419f0033545ffce4fdf00.asciidoc000066400000000000000000000011561506101734100262060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:372 [source, python] ---- resp = client.search( index="index2", query={ "query_string": { "query": "running with scissors", "fields": [ "comment", "comment.english" ] } }, highlight={ "order": "score", "fields": { "comment.english": { "type": "fvh", "matched_fields": [ "comment" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1659420311d907d9fc024b96f4150216.asciidoc000066400000000000000000000005611506101734100260320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/length-tokenfilter.asciidoc:27 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "length", "min": 0, "max": 4 } ], text="the quick brown fox jumps over the lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/16634cfa7916cf4e8048a1d70e6240f2.asciidoc000066400000000000000000000027501506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-client.asciidoc:427 [source, python] ---- resp = client.search_application.put( name="my-example-app", search_application={ "indices": [ "example-index" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"highlight\": {\n \"fields\": {\n \"title\": { \"fragment_size\": 0 },\n \"plot\": { \"fragment_size\": 200 }\n }\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", "params": { "query": "", "_es_filters": {}, "_es_aggs": {}, "_es_sort_fields": {}, "size": 10, "from": 0 }, "dictionary": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/166bcfc6d5d39defec7ad6aa44d0914b.asciidoc000066400000000000000000000004641506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:80 [source, python] ---- resp = client.tasks.list() print(resp) resp1 = client.tasks.list( nodes="nodeId1,nodeId2", ) print(resp1) resp2 = client.tasks.list( nodes="nodeId1,nodeId2", actions="cluster:*", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/16985e5b17d2da0955a14fbe02e8dfca.asciidoc000066400000000000000000000005131506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:243 [source, python] ---- resp = client.termvectors( index="my-index-000001", id="1", fields=[ "text" ], offsets=True, payloads=True, positions=True, term_statistics=True, field_statistics=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/169b39bb889ecd47541bed3e48725488.asciidoc000066400000000000000000000003711506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:73 [source, python] ---- resp = client.search( index="bug_reports", query={ "term": { "labels": "urgent" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/16a7ce08b4a6b3af269f27eecc71d664.asciidoc000066400000000000000000000004101506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:546 [source, python] ---- resp = client.indices.delete( index="books", ) print(resp) resp1 = client.indices.delete( index="my-explicit-mappings-books", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/170c8a3fb81a4e93cd3034a3b5a43ac9.asciidoc000066400000000000000000000007501506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:280 [source, python] ---- resp = client.index( index="test", id="1", document={ "location": { "coordinates": [ [ 46.25, 20.14 ], [ 47.49, 19.04 ] ], "type": "multipoint" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/172155ca4bf6dfcbd489453f50739396.asciidoc000066400000000000000000000004011506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:408 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot*", size="2", sort="name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17266cee5eaaddf08e5534bf580a1910.asciidoc000066400000000000000000000002271506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/stats.asciidoc:90 [source, python] ---- resp = client.watcher.stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/172b18e435c400bed85227624de3acfd.asciidoc000066400000000000000000000013041506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/run-as-privilege.asciidoc:143 [source, python] ---- resp = client.security.put_role( name="my_analyst_role", refresh=True, cluster=[ "monitor" ], indices=[ { "names": [ "index1", "index2" ], "privileges": [ "manage" ] } ], applications=[ { "application": "myapp", "privileges": [ "read" ], "resources": [ "*" ] } ], metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/172d150e56a225155a62c7b18bf8da67.asciidoc000066400000000000000000000004651506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:502 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT YEAR(release_date) AS year FROM library WHERE page_count > 300 AND author = 'Frank Herbert' GROUP BY year HAVING COUNT(*) > 0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17316a81c9dbdd120b7754116bf0461c.asciidoc000066400000000000000000000014701506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/_connectors-create-native-api-key.asciidoc:12 [source, python] ---- resp = client.security.create_api_key( name="my-connector-api-key", role_descriptors={ "my-connector-connector-role": { "cluster": [ "monitor", "manage_connector" ], "indices": [ { "names": [ "my-index_name", ".search-acl-filter-my-index_name", ".elastic-connectors*" ], "privileges": [ "all" ], "allow_restricted_indices": False } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1736545c8b5674f6d311f3277eb387f1.asciidoc000066400000000000000000000004011506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc:131 [source, python] ---- resp = client.indices.put_data_lifecycle( name="my-data-stream", data_retention="30d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/173b190078621415a80e851eaf794e8a.asciidoc000066400000000000000000000012241506101734100262000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/standard-analyzer.asciidoc:154 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_english_analyzer": { "type": "standard", "max_token_length": 5, "stopwords": "_english_" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_english_analyzer", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/174b93c323aa8e9cc8ee2a3df5736810.asciidoc000066400000000000000000000026161506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delegate-pki-authentication.asciidoc:83 [source, python] ---- resp = client.security.delegate_pki( x509_certificate_chain=[ "MIIDeDCCAmCgAwIBAgIUBzj/nGGKxP2iXawsSquHmQjCJmMwDQYJKoZIhvcNAQELBQAwUzErMCkGA1UEAxMiRWxhc3RpY3NlYXJjaCBUZXN0IEludGVybWVkaWF0ZSBDQTEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMB4XDTIzMDcxODE5MjkwNloXDTQzMDcxMzE5MjkwNlowSjEiMCAGA1UEAxMZRWxhc3RpY3NlYXJjaCBUZXN0IENsaWVudDEWMBQGA1UECxMNRWxhc3RpY3NlYXJjaDEMMAoGA1UEChMDb3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAllHL4pQkkfwAm/oLkxYYO+r950DEy1bjH+4viCHzNADLCTWO+lOZJVlNx7QEzJE3QGMdif9CCBBxQFMapA7oUFCLq84fPSQQu5AnvvbltVD9nwVtCs+9ZGDjMKsz98RhSLMFIkxdxi6HkQ3Lfa4ZSI4lvba4oo+T/GveazBDS+NgmKyq00EOXt3tWi1G9vEVItommzXWfv0agJWzVnLMldwkPqsw0W7zrpyT7FZS4iLbQADGceOW8fiauOGMkscu9zAnDR/SbWl/chYioQOdw6ndFLn1YIFPd37xL0WsdsldTpn0vH3YfzgLMffT/3P6YlwBegWzsx6FnM/93Ecb4wIDAQABo00wSzAJBgNVHRMEAjAAMB0GA1UdDgQWBBQKNRwjW+Ad/FN1Rpoqme/5+jrFWzAfBgNVHSMEGDAWgBRcya0c0x/PaI7MbmJVIylWgLqXNjANBgkqhkiG9w0BAQsFAAOCAQEACZ3PF7Uqu47lplXHP6YlzYL2jL0D28hpj5lGtdha4Muw1m/BjDb0Pu8l0NQ1z3AP6AVcvjNDkQq6Y5jeSz0bwQlealQpYfo7EMXjOidrft1GbqOMFmTBLpLA9SvwYGobSTXWTkJzonqVaTcf80HpMgM2uEhodwTcvz6v1WEfeT/HMjmdIsq4ImrOL9RNrcZG6nWfw0HR3JNOgrbfyEztEI471jHznZ336OEcyX7gQuvHE8tOv5+oD1d7s3Xg1yuFp+Ynh+FfOi3hPCuaHA+7F6fLmzMDLVUBAllugst1C3U+L/paD7tqIa4ka+KNPCbSfwazmJrt4XNiivPR4hwH5g==" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17566e23c191f1004a2719f2c4242307.asciidoc000066400000000000000000000003021506101734100260120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/get-autoscaling-capacity.asciidoc:268 [source, python] ---- resp = client.autoscaling.get_autoscaling_capacity() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/178be73b74ba9f297429e32267084ac7.asciidoc000066400000000000000000000012021506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-or-query.asciidoc:10 [source, python] ---- resp = client.search( query={ "span_or": { "clauses": [ { "span_term": { "field": "value1" } }, { "span_term": { "field": "value2" } }, { "span_term": { "field": "value3" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/178c920d5e8ec0071f77290fa059802c.asciidoc000066400000000000000000000004261506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:138 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "refresh_interval": "1s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/179f0a3e84ff4bbac18787a018eabf89.asciidoc000066400000000000000000000006411506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:482 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "Jon", "type": "cross_fields", "analyzer": "standard", "fields": [ "first", "last", "*.edge" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17a1e308761afd3282f13d44d7be008a.asciidoc000066400000000000000000000005641506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:699 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "comment": { "type": "text", "term_vector": "with_positions_offsets" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17b1647c8509543f2388c886f2584a20.asciidoc000066400000000000000000000013131506101734100260550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // reranking/semantic-reranking.asciidoc:107 [source, python] ---- resp = client.search( retriever={ "text_similarity_reranker": { "retriever": { "standard": { "query": { "match": { "text": "How often does the moon hide the sun?" } } } }, "field": "text", "inference_id": "elastic-rerank", "inference_text": "How often does the moon hide the sun?", "rank_window_size": 100, "min_score": 0.5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17c2b0a6b0305804ff3b7fd3b4a68df3.asciidoc000066400000000000000000000013101506101734100265340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-pipeline.asciidoc:223 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "_description", "processors": [ { "set": { "field": "field2", "value": "_value" } } ] }, docs=[ { "_index": "index", "_id": "id", "_source": { "foo": "bar" } }, { "_index": "index", "_id": "id", "_source": { "foo": "rab" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17dd67a66c49f7eb618dd17430e48dfa.asciidoc000066400000000000000000000006671506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:239 [source, python] ---- resp = client.search( index="index", query={ "constant_score": { "filter": { "range": { "my_date": { "gte": "now-1h/m", "lte": "now/m" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17e6f3fac556f08a78f7a876e71acb89.asciidoc000066400000000000000000000004601506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/delayed.asciidoc:40 [source, python] ---- resp = client.indices.put_settings( index="_all", settings={ "settings": { "index.unassigned.node_left.delayed_timeout": "5m" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17f8a8990b0166befa3bc2b10fd28134.asciidoc000066400000000000000000000004761506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:40 [source, python] ---- resp = client.index( index="my-index-000001", id="match_value", document={ "query": { "match": { "field": "value" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/17fb298fb1e47f7d946a772d68f4e2df.asciidoc000066400000000000000000000004211506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:246 [source, python] ---- resp = client.delete_by_query( index="my-data-stream", query={ "match": { "user.id": "vlb44hny" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/182df084f028479ecbe8d7648ddad892.asciidoc000066400000000000000000000002541506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/start-ilm.asciidoc:84 [source, python] ---- resp = client.ilm.get_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/186a7143d50e8c3ee01094e1a9ff0c0c.asciidoc000066400000000000000000000016131506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:659 [source, python] ---- resp = client.indices.create( index="passage_vectors", mappings={ "properties": { "full_text": { "type": "text" }, "creation_time": { "type": "date" }, "paragraph": { "type": "nested", "properties": { "vector": { "type": "dense_vector", "dims": 2, "index_options": { "type": "hnsw" } }, "text": { "type": "text", "index": False } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/187733e50c60350f3f75921bea3b72c2.asciidoc000066400000000000000000000005731506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:615 [source, python] ---- resp = client.search( index="my-index-000001", scroll="1m", slice={ "field": "@timestamp", "id": 0, "max": 10 }, query={ "match": { "message": "foo" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/187e8786e0a90f1f6278cf89b670de0a.asciidoc000066400000000000000000000021771506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:891 [source, python] ---- resp = client.indices.create( index="german_example", settings={ "analysis": { "filter": { "german_stop": { "type": "stop", "stopwords": "_german_" }, "german_keywords": { "type": "keyword_marker", "keywords": [ "Beispiel" ] }, "german_stemmer": { "type": "stemmer", "language": "light_german" } }, "analyzer": { "rebuilt_german": { "tokenizer": "standard", "filter": [ "lowercase", "german_stop", "german_keywords", "german_normalization", "german_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/188e6208cccb13027a5c1c95440841ee.asciidoc000066400000000000000000000022741506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filters-aggregation.asciidoc:13 [source, python] ---- resp = client.bulk( index="logs", refresh=True, operations=[ { "index": { "_id": 1 } }, { "body": "warning: page could not be rendered" }, { "index": { "_id": 2 } }, { "body": "authentication error" }, { "index": { "_id": 3 } }, { "body": "warning: connection timed out" } ], ) print(resp) resp1 = client.search( index="logs", size=0, aggs={ "messages": { "filters": { "filters": { "errors": { "match": { "body": "error" } }, "warnings": { "match": { "body": "warning" } } } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/189f0cd1ee2485cf11a2968f01d54e5b.asciidoc000066400000000000000000000013571506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/derivative-aggregation.asciidoc:235 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } }, "sales_deriv": { "derivative": { "buckets_path": "sales", "unit": "day" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/18ddb7e7a4bcafd449df956e828ed7a8.asciidoc000066400000000000000000000003021506101734100270230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:552 [source, python] ---- resp = client.tasks.cancel( task_id="r1A2WoRbTwKZ516z6NEs5A:36619", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/190a21e32db2125ddaea0f634e126a84.asciidoc000066400000000000000000000003201506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clone-index.asciidoc:97 [source, python] ---- resp = client.indices.clone( index="my_source_index", target="my_target_index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/19174d872fd1e43cbfb7a96a33d13c96.asciidoc000066400000000000000000000035331506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cartesian-centroid-aggregation.asciidoc:183 [source, python] ---- resp = client.indices.create( index="places", mappings={ "properties": { "geometry": { "type": "shape" } } }, ) print(resp) resp1 = client.bulk( index="places", refresh=True, operations=[ { "index": { "_id": 1 } }, { "name": "NEMO Science Museum", "geometry": "POINT(491.2350 5237.4081)" }, { "index": { "_id": 2 } }, { "name": "Sportpark De Weeren", "geometry": { "type": "Polygon", "coordinates": [ [ [ 496.5305328369141, 5239.347642069457 ], [ 496.6979026794433, 5239.172175893484 ], [ 496.9425201416015, 5239.238958618537 ], [ 496.7944622039794, 5239.420969150824 ], [ 496.5305328369141, 5239.347642069457 ] ] ] } } ], ) print(resp1) resp2 = client.search( index="places", size="0", aggs={ "centroid": { "cartesian_centroid": { "field": "geometry" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/192fa1f6f51dfb640e9e15bb5cd7eebc.asciidoc000066400000000000000000000002561506101734100270620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/error-handling.asciidoc:148 [source, python] ---- resp = client.ilm.retry( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/193234bb5dc6451fd15b584fbefd2446.asciidoc000066400000000000000000000011731506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/role-templates.asciidoc:16 [source, python] ---- resp = client.security.put_role( name="example1", indices=[ { "names": [ "my-index-000001" ], "privileges": [ "read" ], "query": { "template": { "source": { "term": { "acl.username": "{{_user.username}}" } } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/193704020a19714dec390452a4e75e8d.asciidoc000066400000000000000000000002601506101734100261670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:54 [source, python] ---- resp = client.indices.create( index="books", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/193d86b6cc34e12c2be806d27816a35c.asciidoc000066400000000000000000000011471506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:363 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "size": 5, "query_string": "mountain climbing", "text_fields": [ { "name": "title", "boost": 10 }, { "name": "description", "boost": 2 }, { "name": "state", "boost": 1 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/194bbac15e709174ac85b681f3a3d137.asciidoc000066400000000000000000000011761506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:195 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "template*" ], template={ "settings": { "number_of_shards": 1 }, "aliases": { "alias1": {}, "alias2": { "filter": { "term": { "user.id": "kimchy" } }, "routing": "shard-1" }, "{index}-alias": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/196aed02b11def364bab84e455c1a073.asciidoc000066400000000000000000000004021506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:333 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "logs-*" ], data_stream={}, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/199f5165d876267080046c907e93483f.asciidoc000066400000000000000000000003731506101734100260160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:153 [source, python] ---- resp = client.indices.analyze( index="my-index-000001", field="my-field", text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/19c00c6b29bc7dbc5e92b3668da2da93.asciidoc000066400000000000000000000007331506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-ingest.asciidoc:279 [source, python] ---- resp = client.simulate.ingest( docs=[ { "_index": "my-index", "_id": "123", "_source": { "foo": "bar" } }, { "_index": "my-index", "_id": "456", "_source": { "foo": "rab" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/19ee488226d357d1576e7d3ae7a4693f.asciidoc000066400000000000000000000004041506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/keyword-analyzer.asciidoc:14 [source, python] ---- resp = client.indices.analyze( analyzer="keyword", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a1f3421717ff744ed83232729289bb0.asciidoc000066400000000000000000000002751506101734100262030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-delete.asciidoc:71 [source, python] ---- resp = client.slm.delete_lifecycle( policy_id="daily-snapshots", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a2890b90f3699fc2a4f27f94b145be9.asciidoc000066400000000000000000000010031506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:487 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="nightly-cluster-state-snapshots", schedule="0 30 2 * * ?", name="", repository="my_secure_repository", config={ "include_global_state": True, "indices": "-*" }, retention={ "expire_after": "30d", "min_count": 5, "max_count": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a3897cfb4f974c09d0d847baac8aa6d.asciidoc000066400000000000000000000004441506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:196 [source, python] ---- resp = client.indices.stats( level="shards", human=True, expand_wildcards="all", filter_path="indices.*.total.indexing.index_total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a3a4b8a4bfee4ab84ddd13d8835f560.asciidoc000066400000000000000000000003171506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/start-dfanalytics.asciidoc:88 [source, python] ---- resp = client.ml.start_data_frame_analytics( id="loganalytics", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a4f8beb6847678880ca113ee6fb75ca.asciidoc000066400000000000000000000005551506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:362 [source, python] ---- resp = client.search( index="music", pretty=True, suggest={ "song-suggest": { "regex": "n[ever|i]r", "completion": { "field": "suggest" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a56df055b94466ca76818e0858752c6.asciidoc000066400000000000000000000006501506101734100262220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:97 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="openai_embeddings", inference_config={ "service": "openai", "service_settings": { "api_key": "", "model_id": "text-embedding-ada-002" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a6dbe5df488c4a16e2f1101ba8a25d9.asciidoc000066400000000000000000000003571506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pattern-tokenizer.asciidoc:32 [source, python] ---- resp = client.indices.analyze( tokenizer="pattern", text="The foo_bar_size's default is 5.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a7483796087053ba55029d0dc2ab356.asciidoc000066400000000000000000000005341506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:191 [source, python] ---- resp = client.index( index="mv", refresh=True, document={ "a": [ 2, None, 1 ] }, ) print(resp) resp1 = client.esql.query( query="FROM mv | LIMIT 1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1a81fe0186369838531e116e85aa4ccd.asciidoc000066400000000000000000000012131506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:29 [source, python] ---- resp = client.indices.create( index="shirts", mappings={ "properties": { "brand": { "type": "keyword" }, "color": { "type": "keyword" }, "model": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="shirts", id="1", refresh=True, document={ "brand": "gucci", "color": "red", "model": "slim" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1a8d92e93481c432a91f7c213099800a.asciidoc000066400000000000000000000002531506101734100261750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:295 [source, python] ---- resp = client.security.query_api_keys() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a9e03ce0355872a7db27fedc783fbec.asciidoc000066400000000000000000000006731506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-google-vertex-ai.asciidoc:151 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="google_vertex_ai_rerank", inference_config={ "service": "googlevertexai", "service_settings": { "service_account_json": "", "project_id": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1a9efb56adb2cd84faa9825a129381b9.asciidoc000066400000000000000000000005361506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-search.asciidoc:222 [source, python] ---- resp = client.rollup.rollup_search( index="sensor-1,sensor_rollup", size=0, aggregations={ "max_temperature": { "max": { "field": "temperature" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1aa91d3d48140d6367b6cabca8737b8f.asciidoc000066400000000000000000000013571506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/bulk.asciidoc:642 [source, python] ---- resp = client.bulk( operations=[ { "update": { "_id": "5", "_index": "index1" } }, { "doc": { "my_field": "foo" } }, { "update": { "_id": "6", "_index": "index1" } }, { "doc": { "my_field": "foo" } }, { "create": { "_id": "7", "_index": "index1" } }, { "my_field": "foo" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1aa96eeaf63fc967e166d1a2fcdccccc.asciidoc000066400000000000000000000015061506101734100272210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/subobjects.asciidoc:131 [source, python] ---- resp = client.indices.create( index="my-index-000002", mappings={ "properties": { "metrics": { "subobjects": False, "properties": { "time": { "type": "object", "properties": { "min": { "type": "long" }, "max": { "type": "long" } } } } } } }, ) print(resp) resp1 = client.indices.get_mapping( index="my-index-000002", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1adee74383e5594e45c937177d75aa2a.asciidoc000066400000000000000000000004061506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:93 [source, python] ---- resp = client.search( index="my_index", query={ "match_all": {} }, sort={ "my_counter": "desc" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b076ceb1ead9f6897c2f351f0e45f74.asciidoc000066400000000000000000000012611506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-api-keys.asciidoc:226 [source, python] ---- resp = client.security.create_api_key( name="my-restricted-api-key", role_descriptors={ "my-restricted-role-descriptor": { "indices": [ { "names": [ "my-search-app" ], "privileges": [ "read" ] } ], "restriction": { "workflows": [ "search_application_query" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b0b29e5cd7550c648d0892378e93804.asciidoc000066400000000000000000000003701506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-calendar-job.asciidoc:42 [source, python] ---- resp = client.ml.delete_calendar_job( calendar_id="planned-outages", job_id="total-requests", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b0dc9d076bbb58c6a2953ef4323d2fc.asciidoc000066400000000000000000000004331506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/ack-watch.asciidoc:196 [source, python] ---- resp = client.watcher.ack_watch( watch_id="my_watch", action_id="test_index", ) print(resp) resp1 = client.watcher.get_watch( id="my_watch", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1b0f40959a7a4d124372f2bd3f7eac85.asciidoc000066400000000000000000000013431506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/fingerprint-tokenfilter.asciidoc:117 [source, python] ---- resp = client.indices.create( index="custom_fingerprint_example", settings={ "analysis": { "analyzer": { "whitespace_": { "tokenizer": "whitespace", "filter": [ "fingerprint_plus_concat" ] } }, "filter": { "fingerprint_plus_concat": { "type": "fingerprint", "max_output_size": 100, "separator": "+" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b2ab75d3c8064fac6ecc63104396c02.asciidoc000066400000000000000000000003621506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/put-calendar-job.asciidoc:42 [source, python] ---- resp = client.ml.put_calendar_job( calendar_id="planned-outages", job_id="total-requests", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b3762712c14a19e8c2956b4f530d327.asciidoc000066400000000000000000000012731506101734100261750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/put-follow.asciidoc:114 [source, python] ---- resp = client.ccr.follow( index="follower_index", wait_for_active_shards="1", remote_cluster="remote_cluster", leader_index="leader_index", settings={ "index.number_of_replicas": 0 }, max_read_request_operation_count=1024, max_outstanding_read_requests=16, max_read_request_size="1024k", max_write_request_operation_count=32768, max_write_request_size="16k", max_outstanding_write_requests=8, max_write_buffer_count=512, max_write_buffer_size="512k", max_retry_delay="10s", read_poll_timeout="30s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b37e2237c9e3aaf84d56cc5c0bdb9ec.asciidoc000066400000000000000000000006731506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/error-handling.asciidoc:19 [source, python] ---- resp = client.ilm.put_lifecycle( name="shrink-index", policy={ "phases": { "warm": { "min_age": "5d", "actions": { "shrink": { "number_of_shards": 4 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b47d988b218ee595430ec91eba91d80.asciidoc000066400000000000000000000007111506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/ignore-missing-component-templates.asciidoc:47 [source, python] ---- resp = client.indices.put_index_template( name="logs-foo", index_patterns=[ "logs-foo-*" ], data_stream={}, composed_of=[ "logs-foo_component1", "logs-foo_component2" ], ignore_missing_component_templates=[ "logs-foo_component2" ], priority=500, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b5c8d6e61930a308008b5b1ace2aa07.asciidoc000066400000000000000000000011631506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/properties.asciidoc:74 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "manager.name": "Alice White" } }, aggs={ "Employees": { "nested": { "path": "employees" }, "aggs": { "Employee Ages": { "histogram": { "field": "employees.age", "interval": 5 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1b98b60d8e558fcccf9c550bdbf5b5c9.asciidoc000066400000000000000000000010741506101734100270200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/role-templates.asciidoc:75 [source, python] ---- resp = client.security.put_role( name="example3", indices=[ { "names": [ "my-index-000001" ], "privileges": [ "read" ], "query": { "template": { "source": "{ \"terms\": { \"group.statuses\": {{#toJson}}_user.metadata.statuses{{/toJson}} }}" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ba7afe23a26fe9ac7856d8c5bc1059d.asciidoc000066400000000000000000000021351506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1502 [source, python] ---- resp = client.indices.create( index="romanian_example", settings={ "analysis": { "filter": { "romanian_stop": { "type": "stop", "stopwords": "_romanian_" }, "romanian_keywords": { "type": "keyword_marker", "keywords": [ "exemplu" ] }, "romanian_stemmer": { "type": "stemmer", "language": "romanian" } }, "analyzer": { "rebuilt_romanian": { "tokenizer": "standard", "filter": [ "lowercase", "romanian_stop", "romanian_keywords", "romanian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1bceb160ed2bcd51ee040caf21acf780.asciidoc000066400000000000000000000037041506101734100270240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:391 [source, python] ---- resp = client.search_application.put( name="my-search-app", search_application={ "indices": [ "index1" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"retriever\": {\n \"rrf\": {\n \"retrievers\": [\n {{#text_fields}}\n {\n \"standard\": {\n \"query\": {\n \"match\": {\n \"{{.}}\": \"{{query_string}}\"\n }\n }\n }\n },\n {{/text_fields}}\n {{#elser_fields}}\n {\n \"standard\": {\n \"query\": {\n \"sparse_vector\": {\n \"field\": \"ml.inference.{{.}}_expanded.predicted_value\",\n \"inference_id\": \"\",\n \"query\": \"{{query_string}}\"\n }\n }\n }\n },\n {{/elser_fields}}\n ],\n \"rank_window_size\": {{rrf.rank_window_size}},\n \"rank_constant\": {{rrf.rank_constant}}\n }\n }\n }\n ", "params": { "elser_fields": [ "title", "meta_description" ], "text_fields": [ "title", "meta_description" ], "query_string": "", "rrf": { "rank_window_size": 100, "rank_constant": 60 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1c142bc8cac8d9dcb4f60e22902d434f.asciidoc000066400000000000000000000006141506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/string-stats-aggregation.asciidoc:65 [source, python] ---- resp = client.search( index="my-index-000001", size="0", aggs={ "message_stats": { "string_stats": { "field": "message.keyword", "show_distribution": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1c1f2a6a193d9e64c37242b2824b3031.asciidoc000066400000000000000000000024051506101734100262350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/tsds-reindex.asciidoc:44 [source, python] ---- resp = client.cluster.put_component_template( name="source_template", template={ "settings": { "index": { "number_of_replicas": 2, "number_of_shards": 2, "mode": "time_series", "routing_path": [ "metricset" ] } }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "metricset": { "type": "keyword", "time_series_dimension": True }, "k8s": { "properties": { "tx": { "type": "long" }, "rx": { "type": "long" } } } } } }, ) print(resp) resp1 = client.indices.put_index_template( name="1", index_patterns=[ "k8s*" ], composed_of=[ "source_template" ], data_stream={}, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1c330f0fc9eac19d0edeb8c4017b9b93.asciidoc000066400000000000000000000010141506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:67 [source, python] ---- resp = client.ingest.put_pipeline( id="hugging_face_embeddings_pipeline", processors=[ { "inference": { "model_id": "hugging_face_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1c3e3c4f2d268f1826a9b417e1868a58.asciidoc000066400000000000000000000005251506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:317 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script={ "source": "ctx._source.tags.add(params['tag'])", "lang": "painless", "params": { "tag": "blue" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1c87b5bf682bc1e8809a657529e14b07.asciidoc000066400000000000000000000021531506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:189 [source, python] ---- resp = client.indices.create( index="shapes", mappings={ "properties": { "location": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.index( index="shapes", id="deu", document={ "location": { "type": "envelope", "coordinates": [ [ 13, 53 ], [ 14, 52 ] ] } }, ) print(resp1) resp2 = client.search( index="example", query={ "bool": { "filter": { "geo_shape": { "location": { "indexed_shape": { "index": "shapes", "id": "deu", "path": "location" } } } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/1c8b6768c4eefc76fcb38708152f561b.asciidoc000066400000000000000000000003211506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/delete-dfanalytics.asciidoc:57 [source, python] ---- resp = client.ml.delete_data_frame_analytics( id="loganalytics", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1c9dac4183a3532c91dbd1a46907729b.asciidoc000066400000000000000000000002751506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:459 [source, python] ---- resp = client.indices.delete( index="music", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cab9da122778a95061831265c250cc1.asciidoc000066400000000000000000000010511506101734100262330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/valuecount-aggregation.asciidoc:49 [source, python] ---- resp = client.search( index="sales", size=0, runtime_mappings={ "tags": { "type": "keyword", "script": "\n emit(doc['type'].value);\n if (doc['promoted'].value) {\n emit('hot');\n }\n " } }, aggs={ "tags_count": { "value_count": { "field": "tags" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cadbcf2cfeb312f73b7f098291356ac.asciidoc000066400000000000000000000003651506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:345 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTIPOINT (102.0 2.0, 103.0 2.0)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cb3b45335ab1b9697c358104d44ea39.asciidoc000066400000000000000000000003661506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/using-ip-filtering.asciidoc:158 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.security.transport.filter.enabled": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cbecd19be22979aefb45b4f160e77ea.asciidoc000066400000000000000000000010251506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:171 [source, python] ---- resp = client.ingest.put_pipeline( id="google_vertex_ai_embeddings_pipeline", processors=[ { "inference": { "model_id": "google_vertex_ai_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cca4bb2f0ea7e43181be8bd965149d4.asciidoc000066400000000000000000000003721506101734100266320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1296 [source, python] ---- resp = client.eql.get( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", wait_for_completion_timeout="2s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cd3b9d65576a9212eef898eb3105758.asciidoc000066400000000000000000000003571506101734100263660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/restart-cluster.asciidoc:35 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.enable": "primaries" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cea60c47d5c0e150b4c8fff4cd75ffe.asciidoc000066400000000000000000000014071506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/script.asciidoc:112 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "script": { "description": "Set index based on `lang` field and `dataset` param", "lang": "painless", "source": "\n ctx['_index'] = ctx['lang'] + '-' + params['dataset'];\n ", "params": { "dataset": "catalog" } } } ] }, docs=[ { "_index": "generic-index", "_source": { "lang": "fr" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ceaa211756e2db3d48c6bc4b1a861b0.asciidoc000066400000000000000000000005511506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:944 [source, python] ---- resp = client.eql.search( index="my-index*", max_samples_per_key=2, size=20, query="\n sample\n [any where uptime > 0] by host,os\n [any where port > 100] by host,op_sys\n [any where bool == true] by host,os\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cecd4d87a92427175157d41859df2af.asciidoc000066400000000000000000000004021506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/allocation-explain.asciidoc:16 [source, python] ---- resp = client.cluster.allocation_explain( index="my-index-000001", shard=0, primary=False, current_node="my-node", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1cfa04e9654c1484e3d4c75bf439400a.asciidoc000066400000000000000000000026301506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:226 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "polygon", "coordinates": [ [ [ 1000, -1001 ], [ 1001, -1001 ], [ 1001, -1000 ], [ 1000, -1000 ], [ 1000, -1001 ] ], [ [ 1000.2, -1001.2 ], [ 1000.8, -1001.2 ], [ 1000.8, -1001.8 ], [ 1000.2, -1001.8 ], [ 1000.2, -1001.2 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1d252d9217c61c2c1cbe7a92f77b078f.asciidoc000066400000000000000000000034631506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:613 [source, python] ---- resp = client.security.query_api_keys( size=0, query={ "bool": { "must": { "term": { "invalidated": False } }, "should": [ { "range": { "expiration": { "gte": "now" } } }, { "bool": { "must_not": { "exists": { "field": "expiration" } } } } ], "minimum_should_match": 1 } }, aggs={ "keys_by_username": { "composite": { "sources": [ { "usernames": { "terms": { "field": "username" } } } ] }, "aggs": { "expires_soon": { "filter": { "range": { "expiration": { "lte": "now+30d/d" } } }, "aggs": { "key_names": { "terms": { "field": "name" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1d746272a7511bf91302a15b5c58ca0e.asciidoc000066400000000000000000000006661506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:707 [source, python] ---- resp = client.search( index="passage_vectors", fields=[ "full_text", "creation_time" ], source=False, knn={ "query_vector": [ 0.45, 45 ], "field": "paragraph.vector", "k": 2, "num_candidates": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1d9b695a17cffd910c496c9b03c75d6f.asciidoc000066400000000000000000000011651506101734100266010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:34 [source, python] ---- resp = client.ilm.put_lifecycle( name="pre-dsl-ilm-policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb" } } }, "delete": { "min_age": "7d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1dadb7efe27b6c0c231eb6535e413bd9.asciidoc000066400000000000000000000010021506101734100266750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-azure-ai-studio.asciidoc:168 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="azure_ai_studio_embeddings", inference_config={ "service": "azureaistudio", "service_settings": { "api_key": "", "target": "", "provider": "", "endpoint_type": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1db086021e83205b6eab3b7765911cc2.asciidoc000066400000000000000000000006331506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/parent-aggregation.asciidoc:16 [source, python] ---- resp = client.indices.create( index="parent_example", mappings={ "properties": { "join": { "type": "join", "relations": { "question": "answer" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1db715eb00832686ecddb6603684fc26.asciidoc000066400000000000000000000002511506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/enroll-kibana.asciidoc:34 [source, python] ---- resp = client.security.enroll_kibana() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1dbb8cf17fbc45c87c7d2f75f15f9778.asciidoc000066400000000000000000000003061506101734100266700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:102 [source, python] ---- resp = client.cluster.state( filter_path="metadata.indices.*.stat*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e08e054c761353f99211cd18e8ca47b.asciidoc000066400000000000000000000003541506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-snapshot.asciidoc:49 [source, python] ---- resp = client.ml.delete_model_snapshot( job_id="farequote", snapshot_id="1491948163", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e0b85750d4e63ebbc927d4627c44bf8.asciidoc000066400000000000000000000003331506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:604 [source, python] ---- resp = client.indices.forcemerge( index="my-index-000001", only_expunge_deletes=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e18a67caf8f06ff2710ec4a8b30f625.asciidoc000066400000000000000000000003431506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:169 [source, python] ---- resp = client.cluster.state( filter_path="metadata.indices.*.state,-metadata.indices.logstash-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e26353d546d733634187b8c3a7837a7.asciidoc000066400000000000000000000003141506101734100261300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connectors-api.asciidoc:110 [source, python] ---- resp = client.connector.list( service_type="sharepoint_online", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e2c5cef7a3f254c71a33865eb4d7569.asciidoc000066400000000000000000000010541506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/distance-feature-query.asciidoc:98 [source, python] ---- resp = client.search( index="items", query={ "bool": { "must": { "match": { "name": "chocolate" } }, "should": { "distance_feature": { "field": "production_date", "pivot": "7d", "origin": "now" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e3384bc255729b65a6f0fc8011ff733.asciidoc000066400000000000000000000002621506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/segments.asciidoc:18 [source, python] ---- resp = client.indices.segments( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e3553a73da487017f7a95088b6aa957.asciidoc000066400000000000000000000003011506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-roles-cache.asciidoc:62 [source, python] ---- resp = client.security.clear_cached_roles( name="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e4b17b830ead15087ccd96151a5ebde.asciidoc000066400000000000000000000010761506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/string-stats-aggregation.asciidoc:133 [source, python] ---- resp = client.search( index="my-index-000001", size=0, runtime_mappings={ "message_and_context": { "type": "keyword", "script": "\n emit(doc['message.keyword'].value + ' ' + doc['context.keyword'].value)\n " } }, aggs={ "message_stats": { "string_stats": { "field": "message_and_context" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e547696f54582840040b1aa6661760c.asciidoc000066400000000000000000000003301506101734100260330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:400 [source, python] ---- resp = client.indices.rollover( alias="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e871f060dbe1a5c316ed205278804a8.asciidoc000066400000000000000000000015541506101734100263320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:338 [source, python] ---- resp = client.search( aggs={ "countries": { "terms": { "field": "artist.country", "order": { "rock>playback_stats.avg": "desc" } }, "aggs": { "rock": { "filter": { "term": { "genre": "rock" } }, "aggs": { "playback_stats": { "stats": { "field": "play_count" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e94a2bb95bc245bcfb87ac7d611cf49.asciidoc000066400000000000000000000006711506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:335 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time", "tdigest": { "execution_hint": "high_accuracy" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1e9cab0b2727624e22e8cf4e7ca498ac.asciidoc000066400000000000000000000003071506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:45 [source, python] ---- resp = client.cluster.health( pretty=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ea24f67fbbb6293d53caf2fe0c4b984.asciidoc000066400000000000000000000004021506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/simple-analyzer.asciidoc:15 [source, python] ---- resp = client.indices.analyze( analyzer="simple", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ead35c954963e83f89872048dabdbe9.asciidoc000066400000000000000000000005701506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-role.asciidoc:137 [source, python] ---- resp = client.security.query_role( query={ "bool": { "must_not": { "term": { "metadata._reserved": True } } } }, sort=[ "name" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1eb9c6ecb827ca69f7b17f7d2a26eae9.asciidoc000066400000000000000000000006401506101734100270150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:280 [source, python] ---- resp = client.render_search_template( source={ "query": { "term": { "url.full": "{{#url}}{{host}}/{{page}}{{/url}}" } } }, params={ "host": "http://example.com", "page": "hello-world" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ec66f188f681598cb5d7df700b214e3.asciidoc000066400000000000000000000014651506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc:365 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "my_custom_keyword_marker_filter", "porter_stem" ] } }, "filter": { "my_custom_keyword_marker_filter": { "type": "keyword_marker", "keywords_path": "analysis/example_word_list.txt" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ed26c7b445ab1c167bd9385e1f0066f.asciidoc000066400000000000000000000003701506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/delete-async-sql-search-api.asciidoc:18 [source, python] ---- resp = client.sql.delete_async( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ed77bf308fa4ab328b36060e412f500.asciidoc000066400000000000000000000032221506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:334 [source, python] ---- resp = client.indices.create( index="metrics_index", mappings={ "properties": { "network": { "properties": { "name": { "type": "keyword" } } }, "latency_histo": { "type": "histogram" } } }, ) print(resp) resp1 = client.index( index="metrics_index", id="1", refresh=True, document={ "network.name": "net-1", "latency_histo": { "values": [ 1, 3, 8, 12, 15 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp1) resp2 = client.index( index="metrics_index", id="2", refresh=True, document={ "network.name": "net-2", "latency_histo": { "values": [ 1, 6, 8, 12, 14 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp2) resp3 = client.search( index="metrics_index", size="0", aggs={ "latency_buckets": { "histogram": { "field": "latency_histo", "interval": 5 } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/1eea46b08610972b79fdc4649748455d.asciidoc000066400000000000000000000014361506101734100263060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:82 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "cosineSimilarity(params.query_vector, 'my_dense_vector') + 1.0", "params": { "query_vector": [ 4, 3.4, -0.2 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ef5119db55a6f2b6fc0ab92f36e7f8e.asciidoc000066400000000000000000000006321506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:63 [source, python] ---- resp = client.search( index="my-index-000001", sort=[ { "post_date": { "format": "strict_date_optional_time_nanos" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f00e73c144603e97f6c14ab15fa1913.asciidoc000066400000000000000000000023321506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:940 [source, python] ---- resp = client.indices.create( index="greek_example", settings={ "analysis": { "filter": { "greek_stop": { "type": "stop", "stopwords": "_greek_" }, "greek_lowercase": { "type": "lowercase", "language": "greek" }, "greek_keywords": { "type": "keyword_marker", "keywords": [ "παράδειγμα" ] }, "greek_stemmer": { "type": "stemmer", "language": "greek" } }, "analyzer": { "rebuilt_greek": { "tokenizer": "standard", "filter": [ "greek_lowercase", "greek_stop", "greek_keywords", "greek_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f13c7caef9c2fe0f73fce8795bbc9b0.asciidoc000066400000000000000000000017061506101734100270740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/testing.asciidoc:125 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "std_folded": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "asciifolding" ] } } } }, mappings={ "properties": { "my_text": { "type": "text", "analyzer": "std_folded" } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="std_folded", text="Is this déjà vu?", ) print(resp1) resp2 = client.indices.analyze( index="my-index-000001", field="my_text", text="Is this déjà vu?", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/1f3dd84ab11bae09d3f99b1b3536e239.asciidoc000066400000000000000000000003511506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/create-snapshot-api.asciidoc:31 [source, python] ---- resp = client.snapshot.create( repository="my_repository", snapshot="my_snapshot", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f507659757e2844cefced25848540a0.asciidoc000066400000000000000000000010341506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:187 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "12km", "pin.location": [ -70, 40 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f673e1a0de2970dc648618d5425a994.asciidoc000066400000000000000000000004041506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:273 [source, python] ---- resp = client.indices.refresh() print(resp) resp1 = client.search( index="my-new-index-000001", size="0", filter_path="hits.total", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/1f6a190fa1aade1fb66680388f184ef9.asciidoc000066400000000000000000000005751506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:272 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", rewrite=True, all_shards=True, query={ "match": { "user.id": { "query": "kimchy", "fuzziness": "auto" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f8a6d2cc57ed8997a52354aca371aac.asciidoc000066400000000000000000000011011506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-pki-realm.asciidoc:267 [source, python] ---- resp = client.security.put_role_mapping( name="direct_pki_only", roles=[ "role_for_pki1_direct" ], rules={ "all": [ { "field": { "realm.name": "pki1" } }, { "field": { "metadata.pki_delegated_by_user": None } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1f900f7178e80051e75d4fd04467cf49.asciidoc000066400000000000000000000004061506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/pipeline.asciidoc:79 [source, python] ---- resp = client.index( index="my-index-000001", id="1", pipeline="pipelineB", document={ "field": "value" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1fb2c77c0988bc6545040b20e3afa7e9.asciidoc000066400000000000000000000034041506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/dls-e2e-guide.asciidoc:139 [source, python] ---- resp = client.security.create_api_key( name="john-api-key", expiration="1d", role_descriptors={ "sharepoint-online-role": { "index": [ { "names": [ "sharepoint-search-application" ], "privileges": [ "read" ], "query": { "template": { "params": { "access_control": [ "john@example.co", "Engineering Members" ] }, "source": "\n {\n \"bool\": {\n \"should\": [\n {\n \"bool\": {\n \"must_not\": {\n \"exists\": {\n \"field\": \"_allow_access_control\"\n }\n }\n }\n },\n {\n \"terms\": {\n \"_allow_access_control.enum\": {{#toJson}}access_control{{/toJson}}\n }\n }\n ]\n }\n }\n " } } } ], "restriction": { "workflows": [ "search_application_query" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1fddbd602a6acf896a393cdb500a2831.asciidoc000066400000000000000000000012121506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/rate-aggregation.asciidoc:310 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "by_date": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "avg_number_of_sales_per_year": { "rate": { "field": "price", "unit": "year", "mode": "value_count" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1fe2ed1d65c4774755de44c9b9d6ed67.asciidoc000066400000000000000000000002761506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:986 [source, python] ---- resp = client.nodes.stats( metric="ingest", filter_path="nodes.*.ingest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ff12523efbd59c213c676937757c460.asciidoc000066400000000000000000000003461506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:116 [source, python] ---- resp = client.security.invalidate_api_key( ids=[ "VuaCfGcBCdbkQm-e5aOx" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ff296e868635fd102239871a331331b.asciidoc000066400000000000000000000005631506101734100261250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cardinality-aggregation.asciidoc:47 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "type_count": { "cardinality": { "field": "type", "precision_threshold": 100 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/1ff9b263b7c3e83278bb6a776a51590a.asciidoc000066400000000000000000000005371506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:31 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "prices": { "histogram": { "field": "price", "interval": 50 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20005d8a6555b259b299d862cd218701.asciidoc000066400000000000000000000004651506101734100261220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:190 [source, python] ---- resp = client.search( query={ "match": { "message": { "query": "this is a test", "operator": "and" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2006f577a113bda40905cf7b405bf1cf.asciidoc000066400000000000000000000007131506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:816 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "description": "If 'url.scheme' is 'http', set 'url.insecure' to true", "if": "ctx.url?.scheme =~ /^http[^s]/", "field": "url.insecure", "value": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2009f2d1ba0780a799a0fdce889c9739.asciidoc000066400000000000000000000032321506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:695 [source, python] ---- resp = client.bulk( index="passage_vectors", refresh=True, operations=[ { "index": { "_id": "1" } }, { "full_text": "first paragraph another paragraph", "creation_time": "2019-05-04", "paragraph": [ { "vector": [ 0.45, 45 ], "text": "first paragraph", "paragraph_id": "1" }, { "vector": [ 0.8, 0.6 ], "text": "another paragraph", "paragraph_id": "2" } ] }, { "index": { "_id": "2" } }, { "full_text": "number one paragraph number two paragraph", "creation_time": "2020-05-04", "paragraph": [ { "vector": [ 1.2, 4.5 ], "text": "number one paragraph", "paragraph_id": "1" }, { "vector": [ -1, 42 ], "text": "number two paragraph", "paragraph_id": "2" } ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/200f6d4cc7b9c300b8962a119e03873f.asciidoc000066400000000000000000000003001506101734100263220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-data-stream.asciidoc:286 [source, python] ---- resp = client.indices.get_data_stream( name="my-data-stream*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20162e1dac807a7604f58dad814d1bc5.asciidoc000066400000000000000000000012741506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/hunspell-tokenfilter.asciidoc:199 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "en": { "tokenizer": "standard", "filter": [ "my_en_US_dict_stemmer" ] } }, "filter": { "my_en_US_dict_stemmer": { "type": "hunspell", "locale": "en_US", "dedup": False } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20179a8889e949d6a8ee5fbf2ba35c96.asciidoc000066400000000000000000000010631506101734100265350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:408 [source, python] ---- resp = client.search( index="google-vertex-ai-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "google_vertex_ai_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/203c3bb334384bdfb11ff1101ccfba25.asciidoc000066400000000000000000000007761506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/phrase-suggest.asciidoc:290 [source, python] ---- resp = client.search( index="test", suggest={ "text": "obel prize", "simple_phrase": { "phrase": { "field": "title.trigram", "size": 1, "smoothing": { "laplace": { "alpha": 0.7 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20407c847adb8393ce41dc656384afc4.asciidoc000066400000000000000000000015061506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:787 [source, python] ---- resp = client.search( index="passage_vectors", fields=[ "creation_time", "full_text" ], source=False, knn={ "query_vector": [ 0.45, 45 ], "field": "paragraph.vector", "k": 2, "num_candidates": 2, "filter": { "bool": { "filter": [ { "range": { "creation_time": { "gte": "2019-05-01", "lte": "2019-05-05" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2051ffe025550ab6645bfd525eaed3c4.asciidoc000066400000000000000000000010761506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:246 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": "POINT (-74.1 40.73)", "bottom_right": "POINT (-71.12 40.01)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2063713516847eef5d1dbf4ca1e877b0.asciidoc000066400000000000000000000035151506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohexgrid-aggregation.asciidoc:29 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggregations={ "large-grid": { "geohex_grid": { "field": "location", "precision": 4 } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/206c723296be8ef8d58aef3ee01f5ba2.asciidoc000066400000000000000000000010101506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline.asciidoc:176 [source, python] ---- resp = client.search( aggs={ "my_date_histo": { "date_histogram": { "field": "timestamp", "calendar_interval": "day" }, "aggs": { "the_deriv": { "derivative": { "buckets_path": "_count" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/206d57bf0cb022c8229894e7753eca83.asciidoc000066400000000000000000000016711506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:58 [source, python] ---- resp = client.search( index="example", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_shape": { "location": { "shape": { "type": "envelope", "coordinates": [ [ 13, 53 ], [ 14, 52 ] ] }, "relation": "within" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2081739da0c69de8af6f5bf9e94433e6.asciidoc000066400000000000000000000005561506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:376 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTILINESTRING ((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0), (100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/208c2b41bd1659aae8f02fa3e3b7378a.asciidoc000066400000000000000000000016371506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/copy-to.asciidoc:15 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "first_name": { "type": "text", "copy_to": "full_name" }, "last_name": { "type": "text", "copy_to": "full_name" }, "full_name": { "type": "text" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "first_name": "John", "last_name": "Smith" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match": { "full_name": { "query": "John Smith", "operator": "and" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/209a9190082498f0b7daa26f8834846b.asciidoc000066400000000000000000000004451506101734100262150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/norms.asciidoc:21 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "title": { "type": "text", "norms": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20bc71cc5bbe04184e27827f3777a406.asciidoc000066400000000000000000000003721506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:723 [source, python] ---- resp = client.search( index="my-index-000001", fields=[ "@timestamp", "day_of_week" ], source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20c595907b4afbf26bd60e816a6ddf6a.asciidoc000066400000000000000000000004261506101734100266410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:275 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "query_string": "kayaking" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20e3b181114e00c943a27a9bbcf85f15.asciidoc000066400000000000000000000004121506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-record.asciidoc:286 [source, python] ---- resp = client.ml.get_records( job_id="low_request_rate", sort="record_score", desc=True, start="1454944100000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/20f62d0540bf6261549bd286416eae28.asciidoc000066400000000000000000000006421506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/put-enrich-policy.asciidoc:30 [source, python] ---- resp = client.enrich.put_policy( name="my-policy", match={ "indices": "users", "match_field": "email", "enrich_fields": [ "first_name", "last_name", "city", "zip", "state" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2105f2d1d81977054a93163a175793ce.asciidoc000066400000000000000000000002601506101734100261160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-status-api.asciidoc:81 [source, python] ---- resp = client.snapshot.status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2155c920d7d860f3ee7542f2211b4fec.asciidoc000066400000000000000000000005761506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/text-expansion-query.asciidoc:25 [source, python] ---- resp = client.search( query={ "text_expansion": { "": { "model_id": "the model to produce the token weights", "model_text": "the query string" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21565b72da426776e445b1a166f6e104.asciidoc000066400000000000000000000006251506101734100261750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-child-query.asciidoc:31 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my-join-field": { "type": "join", "relations": { "parent": "child" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/216848930c2d344fe0bed0daa70c35b9.asciidoc000066400000000000000000000003071506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:620 [source, python] ---- resp = client.tasks.list( detailed=True, actions="*/delete/byquery", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/216a6573ab4ab023e5dcac4eaa08c3c8.asciidoc000066400000000000000000000003251506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/register-repository.asciidoc:185 [source, python] ---- resp = client.snapshot.verify_repository( name="my_unverified_backup", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/216e24f05cbb82c1718713fbab8623d2.asciidoc000066400000000000000000000012311506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geoip.asciidoc:136 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", description="Add ip geolocation info", processors=[ { "geoip": { "field": "ip", "target_field": "geo", "database_file": "GeoLite2-Country.mmdb" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="geoip", document={ "ip": "89.160.20.128" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/21715c32c140feeab04b38ff6d6de111.asciidoc000066400000000000000000000003161506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:143 [source, python] ---- resp = client.indices.get_mapping( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2185c9dfc62a59313df1702ec1c3513e.asciidoc000066400000000000000000000007141506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:88 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time", "percents": [ 95, 99, 99.9 ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/218b9009f120e8ad33f710e019179562.asciidoc000066400000000000000000000003111506101734100261040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-repo-api.asciidoc:125 [source, python] ---- resp = client.snapshot.get_repository( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21a226d91d8edd209f6a821064e83918.asciidoc000066400000000000000000000011661506101734100262660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/global-aggregation.asciidoc:18 [source, python] ---- resp = client.search( index="sales", size="0", query={ "match": { "type": "t-shirt" } }, aggs={ "all_products": { "global": {}, "aggs": { "avg_price": { "avg": { "field": "price" } } } }, "t_shirts": { "avg": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21bb03ca9123de3237c1c76934f9f172.asciidoc000066400000000000000000000015321506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filters-aggregation.asciidoc:138 [source, python] ---- resp = client.index( index="logs", id="4", refresh=True, document={ "body": "info: user Bob logged out" }, ) print(resp) resp1 = client.search( index="logs", size=0, aggs={ "messages": { "filters": { "other_bucket_key": "other_messages", "filters": { "errors": { "match": { "body": "error" } }, "warnings": { "match": { "body": "warning" } } } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/21c1e6ee886140ce0cd67184dd19b981.asciidoc000066400000000000000000000002671506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/dangling-indices-list.asciidoc:19 [source, python] ---- resp = client.dangling_indices.list_dangling_indices() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21cd01cb90d3ea1acd0ab22d7edd2c88.asciidoc000066400000000000000000000010031506101734100270130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:162 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="azure_ai_studio_embeddings", inference_config={ "service": "azureaistudio", "service_settings": { "api_key": "", "target": "", "provider": "", "endpoint_type": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21d0ab6e420bfe7a1639db6af5b2e9c0.asciidoc000066400000000000000000000014701506101734100267000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/median-absolute-deviation-aggregation.asciidoc:116 [source, python] ---- resp = client.search( index="reviews", filter_path="aggregations", size=0, runtime_mappings={ "rating.out_of_ten": { "type": "long", "script": { "source": "emit(doc['rating'].value * params.scaleFactor)", "params": { "scaleFactor": 2 } } } }, aggs={ "review_average": { "avg": { "field": "rating.out_of_ten" } }, "review_variability": { "median_absolute_deviation": { "field": "rating.out_of_ten" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21d41e8cbd107fbdf0901f885834dafc.asciidoc000066400000000000000000000012341506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/wildcard.asciidoc:139 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "card": { "type": "wildcard" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "card": [ "king", "ace", "ace", "jack" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/21d5fe55ca32b10b118224ea1a8a2e04.asciidoc000066400000000000000000000047761506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-count-ks-test-aggregation.asciidoc:81 [source, python] ---- resp = client.search( index="correlate_latency", size="0", filter_path="aggregations", aggs={ "buckets": { "terms": { "field": "version", "size": 2 }, "aggs": { "latency_ranges": { "range": { "field": "latency", "ranges": [ { "to": 0 }, { "from": 0, "to": 105 }, { "from": 105, "to": 225 }, { "from": 225, "to": 445 }, { "from": 445, "to": 665 }, { "from": 665, "to": 885 }, { "from": 885, "to": 1115 }, { "from": 1115, "to": 1335 }, { "from": 1335, "to": 1555 }, { "from": 1555, "to": 1775 }, { "from": 1775 } ] } }, "ks_test": { "bucket_count_ks_test": { "buckets_path": "latency_ranges>_count", "alternative": [ "less", "greater", "two_sided" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/21e95d29bc37deb5689a654aa323b4ba.asciidoc000066400000000000000000000005761506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-ldap-realm.asciidoc:138 [source, python] ---- resp = client.security.put_role_mapping( name="admins", roles=[ "monitoring", "user" ], rules={ "field": { "groups": "cn=admins,dc=example,dc=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/221e9b14567f950008459af77757750e.asciidoc000066400000000000000000000007571506101734100260720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:54 [source, python] ---- resp = client.watcher.put_watch( id="cluster_health_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "http": { "request": { "host": "localhost", "port": 9200, "path": "/_cluster/health" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2224143c45dfc83a2d10b98cd4f94bb5.asciidoc000066400000000000000000000011031506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:415 [source, python] ---- resp = client.search( index="my-index", query={ "bool": { "must_not": [ { "nested": { "path": "comments", "query": { "term": { "comments.author": "nik9000" } } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/222e49c924ca8bac7b41bc952a39261c.asciidoc000066400000000000000000000013421506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/semantic-query.asciidoc:55 [source, python] ---- resp = client.search( index="my-index", size=3, query={ "bool": { "should": [ { "match": { "title": { "query": "mountain lake", "boost": 1 } } }, { "semantic": { "field": "title_semantic", "query": "mountain lake", "boost": 2 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22334f4b24bb8977d3e1bf2ffdc29d3f.asciidoc000066400000000000000000000032351506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:315 [source, python] ---- resp = client.search( query={ "nested": { "path": "parent", "query": { "bool": { "must": { "range": { "parent.age": { "gte": 21 } } }, "filter": { "nested": { "path": "parent.child", "query": { "match": { "parent.child.name": "matt" } } } } } } } }, sort=[ { "parent.child.age": { "mode": "min", "order": "asc", "nested": { "path": "parent", "filter": { "range": { "parent.age": { "gte": 21 } } }, "nested": { "path": "parent.child", "filter": { "match": { "parent.child.name": "matt" } } } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2238ac4170275f6cfc2af49c3f014cbc.asciidoc000066400000000000000000000012161506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/extendedstats-aggregation.asciidoc:108 [source, python] ---- resp = client.search( index="exams", size=0, runtime_mappings={ "grade.corrected": { "type": "double", "script": { "source": "emit(Math.min(100, doc['grade'].value * params.correction))", "params": { "correction": 1.2 } } } }, aggs={ "grades_stats": { "extended_stats": { "field": "grade.corrected" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22619a4111f66e1b7231693b8f8d069a.asciidoc000066400000000000000000000002601506101734100261750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/managing-watches.asciidoc:30 [source, python] ---- resp = client.watcher.query_watches( size=100, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22882d4eb8b99f44c8e0d3a2c893fc4b.asciidoc000066400000000000000000000014271506101734100266010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:408 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my-small": { "type": "keyword", "ignore_above": 2 }, "my-large": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "my-small": [ "ok", "bad" ], "my-large": "ok content" }, ) print(resp1) resp2 = client.search( index="my-index-000001", fields=[ "my-*" ], source=False, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/229b83cbcd8efa1b0288a728a2abacb4.asciidoc000066400000000000000000000026311506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/point.asciidoc:21 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "location": { "type": "point" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Point as an object using GeoJSON format", "location": { "type": "Point", "coordinates": [ -71.34, 41.12 ] } }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "text": "Point as a WKT POINT primitive", "location": "POINT (-71.34 41.12)" }, ) print(resp2) resp3 = client.index( index="my-index-000001", id="3", document={ "text": "Point as an object with 'x' and 'y' keys", "location": { "x": -71.34, "y": 41.12 } }, ) print(resp3) resp4 = client.index( index="my-index-000001", id="4", document={ "text": "Point as an array", "location": [ -71.34, 41.12 ] }, ) print(resp4) resp5 = client.index( index="my-index-000001", id="5", document={ "text": "Point as a string", "location": "-71.34,41.12" }, ) print(resp5) ---- python-elasticsearch-9.1.1/docs/examples/22b176a184517cf1b5801f5eb4f17f97.asciidoc000066400000000000000000000002731506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:349 [source, python] ---- resp = client.indices.rollover( alias="datastream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22cb99d4e6ba3101a2d9f59764a90877.asciidoc000066400000000000000000000003571506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:177 [source, python] ---- resp = client.index( index="example", document={ "location": "POINT (-77.03653 38.897676)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22d8e92b4100f8e4f52260ef8d3aa2b2.asciidoc000066400000000000000000000010511506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/binary.asciidoc:12 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "name": { "type": "text" }, "blob": { "type": "binary" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "name": "Some binary blob", "blob": "U29tZSBiaW5hcnkgYmxvYg==" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/22dd833336fa22c8a8f67bb754ffba9a.asciidoc000066400000000000000000000010531506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:278 [source, python] ---- resp = client.search( index="azure-openai-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "azure_openai_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22dde5fe7ac5d85d52115641a68b3c55.asciidoc000066400000000000000000000006171506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:202 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "lowercase", { "type": "stop", "stopwords": [ "a", "is", "this" ] } ], text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/22ef90a7fb057728d2115f0c6f551819.asciidoc000066400000000000000000000014501506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:250 [source, python] ---- resp = client.search( index="sales", aggs={ "price_ranges": { "range": { "field": "price", "ranges": [ { "to": 100 }, { "from": 100, "to": 200 }, { "from": 200 } ] }, "aggs": { "price_stats": { "stats": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/23074748d6c978176df5b04265e88938.asciidoc000066400000000000000000000004721506101734100261020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-cluster-shard-limit.asciidoc:109 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", name="index.routing.allocation.include._tier_preference", flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2308c9948cbebd2092eec03b11281005.asciidoc000066400000000000000000000005261506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/register-fs-repo.asciidoc:93 [source, python] ---- resp = client.snapshot.create_repository( name="my_fs_backup", repository={ "type": "fs", "settings": { "location": "E:\\Mount\\Backups\\My_fs_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2310d84ebf113f2a3ed14cc53172ae4a.asciidoc000066400000000000000000000005771506101734100265340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/text-expansion-query.asciidoc:100 [source, python] ---- resp = client.search( index="my-index", query={ "text_expansion": { "ml.tokens": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2342a56279106ea643026df657bf7f88.asciidoc000066400000000000000000000010171506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:24 [source, python] ---- resp = client.indices.create( index="index", settings={ "index": { "similarity": { "my_similarity": { "type": "DFR", "basic_model": "g", "after_effect": "l", "normalization": "h2", "normalization.h2.c": "3.0" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/234cec3ead32d7ed71afbe1edfea23df.asciidoc000066400000000000000000000007471506101734100272730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-parent-query.asciidoc:122 [source, python] ---- resp = client.search( query={ "has_parent": { "parent_type": "parent", "score": True, "query": { "function_score": { "script_score": { "script": "_score * doc['view_count'].value" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/236f50d89a07b83119af72e367e685da.asciidoc000066400000000000000000000010151506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:298 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb", "max_age": "30d", "min_primary_shard_size": "1gb" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/239f615e0009c5cb1dc4e82ec4c0dab5.asciidoc000066400000000000000000000012731506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:76 [source, python] ---- resp = client.watcher.put_watch( id="cluster_health_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "http": { "request": { "host": "localhost", "port": 9200, "path": "/_cluster/health", "auth": { "basic": { "username": "elastic", "password": "x-pack-test-password" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/23b062c157235246d7c347b9047b2435.asciidoc000066400000000000000000000005651506101734100260330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:119 [source, python] ---- resp = client.security.put_role_mapping( name="mapping1", roles=[ "user" ], enabled=True, rules={ "field": { "username": "*" } }, metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/23c4ae62f7035f2796e0ac3c7c4c20a9.asciidoc000066400000000000000000000007031506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-migrate.asciidoc:57 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "migrate": {}, "allocate": { "number_of_replicas": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2408020186af569a76a30eccadaed0d5.asciidoc000066400000000000000000000016131506101734100265360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/script.asciidoc:48 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "script": { "description": "Extract 'tags' from 'env' field", "lang": "painless", "source": "\n String[] envSplit = ctx['env'].splitOnToken(params['delimiter']);\n ArrayList tags = new ArrayList();\n tags.add(envSplit[params['position']].trim());\n ctx['tags'] = tags;\n ", "params": { "delimiter": "-", "position": 1 } } } ] }, docs=[ { "_source": { "env": "es01-prod" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24275847128b68da6e14233aa1259fb9.asciidoc000066400000000000000000000017701506101734100262060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:93 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "GET /search" } }, collapse={ "field": "user.id", "inner_hits": [ { "name": "largest_responses", "size": 3, "sort": [ { "http.response.bytes": { "order": "desc" } } ] }, { "name": "most_recent", "size": 3, "sort": [ { "@timestamp": { "order": "desc" } } ] } ] }, sort=[ "http.response.bytes" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/242a26ced0e5706e48dcda19a4003094.asciidoc000066400000000000000000000007461506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:970 [source, python] ---- resp = client.reindex( source={ "remote": { "host": "http://otherhost:9200", "username": "user", "password": "pass" }, "index": "my-index-000001", "query": { "match": { "test": "data" } } }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/246763219ec06172f7aa57bba28d344a.asciidoc000066400000000000000000000052101506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-vectors.asciidoc:159 [source, python] ---- resp = client.indices.create( index="my-rank-vectors-bit", mappings={ "properties": { "my_vector": { "type": "rank_vectors", "element_type": "bit" } } }, ) print(resp) resp1 = client.bulk( index="my-rank-vectors-bit", refresh=True, operations=[ { "index": { "_id": "1" } }, { "my_vector": [ 127, -127, 0, 1, 42 ] }, { "index": { "_id": "2" } }, { "my_vector": "8100012a7f" } ], ) print(resp1) resp2 = client.search( index="my-rank-vectors-bit", query={ "script_score": { "query": { "match_all": {} }, "script": { "source": "maxSimDotProduct(params.query_vector, 'my_vector')", "params": { "query_vector": [ [ 0.35, 0.77, 0.95, 0.15, 0.11, 0.08, 0.58, 0.06, 0.44, 0.52, 0.21, 0.62, 0.65, 0.16, 0.64, 0.39, 0.93, 0.06, 0.93, 0.31, 0.92, 0, 0.66, 0.86, 0.92, 0.03, 0.81, 0.31, 0.2, 0.92, 0.95, 0.64, 0.19, 0.26, 0.77, 0.64, 0.78, 0.32, 0.97, 0.84 ] ] } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/2493c25e1ef944bc4de0f726470bcdec.asciidoc000066400000000000000000000012571506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/frequent-item-sets-aggregation.asciidoc:144 [source, python] ---- resp = client.async_search.submit( index="kibana_sample_data_ecommerce", size=0, aggs={ "my_agg": { "frequent_item_sets": { "minimum_set_size": 3, "fields": [ { "field": "category.keyword" }, { "field": "geoip.city_name", "exclude": "other" } ], "size": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/249bf48252c8cea47ef872541c8a884c.asciidoc000066400000000000000000000027031506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/grant-api-keys.asciidoc:133 [source, python] ---- resp = client.security.grant_api_key( grant_type="password", username="test_admin", password="x-pack-test-password", api_key={ "name": "my-api-key", "expiration": "1d", "role_descriptors": { "role-a": { "cluster": [ "all" ], "indices": [ { "names": [ "index-a*" ], "privileges": [ "read" ] } ] }, "role-b": { "cluster": [ "all" ], "indices": [ { "names": [ "index-b*" ], "privileges": [ "all" ] } ] } }, "metadata": { "application": "my-application", "environment": { "level": 1, "trusted": True, "tags": [ "dev", "staging" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24a037008e0fc2550ecb6a5d36c04a93.asciidoc000066400000000000000000000010421506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:816 [source, python] ---- resp = client.search( index="sales", size="0", runtime_mappings={ "date.day_of_week": { "type": "keyword", "script": "emit(doc['date'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } }, aggs={ "day_of_week": { "terms": { "field": "date.day_of_week" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24ad3c234f69f55a3fbe2d488e70178a.asciidoc000066400000000000000000000012131506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:360 [source, python] ---- resp = client.ml.evaluate_data_frame( index="student_performance_mathematics_reg", query={ "term": { "ml.is_training": { "value": True } } }, evaluation={ "regression": { "actual_field": "G3", "predicted_field": "ml.G3_prediction", "metrics": { "r_squared": {}, "mse": {}, "msle": {}, "huber": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24aee6033bf77a68ced74e3fd9d34283.asciidoc000066400000000000000000000003101506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template-v1.asciidoc:85 [source, python] ---- resp = client.indices.get_template( name="template_1,template_2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24bdccb07bba7e7e6ff45d3d4cd83064.asciidoc000066400000000000000000000006311506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:250 [source, python] ---- resp = client.update_by_query( index="my-data-stream", pipeline="my-pipeline", ) print(resp) resp1 = client.reindex( source={ "index": "my-data-stream" }, dest={ "index": "my-new-data-stream", "op_type": "create", "pipeline": "my-pipeline" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/24d66b2ebdf662d8b03e17214e65c825.asciidoc000066400000000000000000000003661506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:375 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.profiling.templates.enabled": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24d806d1803158dacd4dda73c4204d3e.asciidoc000066400000000000000000000003361506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/get-query-rule.asciidoc:111 [source, python] ---- resp = client.query_rules.get_rule( ruleset_id="my-ruleset", rule_id="my-rule1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/24f4dfdf9922d5aa79151675b7767742.asciidoc000066400000000000000000000004641506101734100263120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:385 [source, python] ---- resp = client.search( index="my-index-000001", scroll="1m", size=100, query={ "match": { "message": "foo" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/253140cb1e270e5ee23e15dbaeaaa0ea.asciidoc000066400000000000000000000003071506101734100267320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:29 [source, python] ---- resp = client.indices.data_streams_stats( human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25576b6773322f0929d4c635a940dba0.asciidoc000066400000000000000000000006401506101734100261770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:530 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "title", "content" ], "query": "this OR that OR thus", "type": "cross_fields", "minimum_should_match": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/256eba7a77c8890a43afeda8ce8a3225.asciidoc000066400000000000000000000010761506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/generate-embeddings.asciidoc:54 [source, python] ---- resp = client.ingest.put_pipeline( id="my-text-embeddings-pipeline", description="Text embedding pipeline", processors=[ { "inference": { "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", "target_field": "my_embeddings", "field_map": { "my_text_field": "text_field" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25737fd456fd317cc4cc2db76b6cf28e.asciidoc000066400000000000000000000004221506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:150 [source, python] ---- resp = client.indices.create( index="test-000001", aliases={ "test-alias": { "is_write_index": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2592e5361f7ea3b3dd1840f63d760dae.asciidoc000066400000000000000000000015541506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/mlt-query.asciidoc:67 [source, python] ---- resp = client.search( query={ "more_like_this": { "fields": [ "name.first", "name.last" ], "like": [ { "_index": "marvel", "doc": { "name": { "first": "Ben", "last": "Grimm" }, "_doc": "You got no idea what I'd... what I'd give to be invisible." } }, { "_index": "marvel", "_id": "2" } ], "min_term_freq": 1, "max_query_terms": 12 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25981b7b3d55b87e1484586d57b695b1.asciidoc000066400000000000000000000004331506101734100262260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/concurrency-control.asciidoc:24 [source, python] ---- resp = client.index( index="products", id="1567", document={ "product": "r2d2", "details": "A resourceful astromech droid" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25a0dad6547d432f5a3d394528f1c138.asciidoc000066400000000000000000000003411506101734100263300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:401 [source, python] ---- resp = client.get( index="my-index-000001", id="2", routing="user1", stored_fields="tags,counter", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25ae1a698f867ba5139605cc952436c0.asciidoc000066400000000000000000000013431506101734100262660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:168 [source, python] ---- resp = client.search( index="place", pretty=True, suggest={ "place_suggestion": { "prefix": "tim", "completion": { "field": "suggest", "size": 10, "contexts": { "place_type": [ { "context": "cafe" }, { "context": "restaurants", "boost": 2 } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25c0e66a433a0cd596e0641b752ff6d7.asciidoc000066400000000000000000000002761506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/shards.asciidoc:414 [source, python] ---- resp = client.cat.shards( h="index,shard,prirep,state,unassigned.reason", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25cb9e1da00dfd971065ce182467434d.asciidoc000066400000000000000000000002651506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/voting-exclusions.asciidoc:122 [source, python] ---- resp = client.cluster.delete_voting_config_exclusions() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25d40d3049e57e2bb70c2c5b88bd7b87.asciidoc000066400000000000000000000004571506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/delayed.asciidoc:95 [source, python] ---- resp = client.indices.put_settings( index="_all", settings={ "settings": { "index.unassigned.node_left.delayed_timeout": "0" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/25ecfe423548ac1d7cc86de4a18c48c6.asciidoc000066400000000000000000000015571506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/pattern-replace-charfilter.asciidoc:54 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "char_filter": [ "my_char_filter" ] } }, "char_filter": { "my_char_filter": { "type": "pattern_replace", "pattern": "(\\d+)-(?=\\d)", "replacement": "$1_" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="My credit card is 123-456-789", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/25ed47fcb890fcf8d8518ae067362d18.asciidoc000066400000000000000000000007311506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/median-absolute-deviation-aggregation.asciidoc:31 [source, python] ---- resp = client.search( index="reviews", size=0, aggs={ "review_average": { "avg": { "field": "rating" } }, "review_variability": { "median_absolute_deviation": { "field": "rating" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/261480571394632db40e88fbb6c59c2f.asciidoc000066400000000000000000000003141506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delete-role-mappings.asciidoc:52 [source, python] ---- resp = client.security.delete_role_mapping( name="mapping1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26168987f799cdc4ee4151c85ba7afc5.asciidoc000066400000000000000000000005521506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:213 [source, python] ---- resp = client.search( index="my-index-000001", filter_path="aggregations", aggs={ "my-num-field-stats": { "stats": { "field": "my-num-field" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/262196e4323dfc1f8e6daf77d7ba3b6a.asciidoc000066400000000000000000000005441506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-gcs.asciidoc:217 [source, python] ---- resp = client.snapshot.create_repository( name="my_gcs_repository", repository={ "type": "gcs", "settings": { "bucket": "my_other_bucket", "base_path": "dev" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2623eb122cc0299b42fc9eca6e7f5e56.asciidoc000066400000000000000000000002731506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-builtin-privileges.asciidoc:64 [source, python] ---- resp = client.security.get_builtin_privileges() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/262a778d754add491fbc9c721ac25bf0.asciidoc000066400000000000000000000004121506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/whitespace-analyzer.asciidoc:14 [source, python] ---- resp = client.indices.analyze( analyzer="whitespace", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26419320085434680142567d5fda9c35.asciidoc000066400000000000000000000006251506101734100257630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:340 [source, python] ---- resp = client.search( index="network-traffic", size=0, aggs={ "ipv4-subnets": { "ip_prefix": { "field": "ipv4", "prefix_length": 24, "min_doc_count": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2643b8c512cb3f3449259cdf498c6ab5.asciidoc000066400000000000000000000015071506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:525 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } }, { "product": { "terms": { "field": "product" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2646710ece0c4c843aebeacd370d0396.asciidoc000066400000000000000000000007431506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:141 [source, python] ---- resp = client.indices.create( index="my-byte-quantized-index", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 3, "index": True, "index_options": { "type": "int8_hnsw" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/268151ed1f0e12586e66e614b61d7981.asciidoc000066400000000000000000000011321506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-polygon-query.asciidoc:122 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_polygon": { "person.location": { "points": [ "drn5x1g8cu2y", "30, -80", "20, -90" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26abfc49c238c2b5d259983ac38dbcee.asciidoc000066400000000000000000000005551506101734100267320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/stemming.asciidoc:173 [source, python] ---- resp = client.search( index="index", query={ "simple_query_string": { "fields": [ "body" ], "quote_field_suffix": ".exact", "query": "\"ski\"" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26bd8c027c82cd72c007c10fa66dc97f.asciidoc000066400000000000000000000004361506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:438 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot_2099.05.06", indices="*", include_global_state=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26d3ab748a855eb383e992eb1ff79662.asciidoc000066400000000000000000000003551506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/delete-async-eql-search-api.asciidoc:20 [source, python] ---- resp = client.eql.delete( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/26f237f9bf14e8b972cc33ff6aebefa2.asciidoc000066400000000000000000000004321506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc:35 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "stemmer" ], text="fox running and jumping", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/270549e6b062228312c4e7a54a2c2209.asciidoc000066400000000000000000000002651506101734100261030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:55 [source, python] ---- resp = client.nodes.hot_threads() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2716453454dbf9c6dde2ea6850a62214.asciidoc000066400000000000000000000012211506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/alias.asciidoc:12 [source, python] ---- resp = client.indices.create( index="trips", mappings={ "properties": { "distance": { "type": "long" }, "route_length_miles": { "type": "alias", "path": "distance" }, "transit_mode": { "type": "keyword" } } }, ) print(resp) resp1 = client.search( query={ "range": { "route_length_miles": { "gte": 39 } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/271fe0b452b62189505ce4a1d6f8bde1.asciidoc000066400000000000000000000005631506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:110 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time", "keyed": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2720e613d520ce352b62e990c2d283f7.asciidoc000066400000000000000000000003041506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/remove-policy-from-index.asciidoc:93 [source, python] ---- resp = client.ilm.remove_policy( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/272e27bf1fcc4fe5dbd4092679dd0342.asciidoc000066400000000000000000000003311506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:604 [source, python] ---- resp = client.indices.add_block( index=".ml-anomalies-custom-example", block="write", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2731a8577ad734a732d784c5dcb1225d.asciidoc000066400000000000000000000021501506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1359 [source, python] ---- resp = client.indices.create( index="norwegian_example", settings={ "analysis": { "filter": { "norwegian_stop": { "type": "stop", "stopwords": "_norwegian_" }, "norwegian_keywords": { "type": "keyword_marker", "keywords": [ "eksempel" ] }, "norwegian_stemmer": { "type": "stemmer", "language": "norwegian" } }, "analyzer": { "rebuilt_norwegian": { "tokenizer": "standard", "filter": [ "lowercase", "norwegian_stop", "norwegian_keywords", "norwegian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/27384266370152add76471dd0332a2f1.asciidoc000066400000000000000000000014121506101734100261000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/update-transform.asciidoc:263 [source, python] ---- resp = client.transform.update_transform( transform_id="simple-kibana-ecomm-pivot", source={ "index": "kibana_sample_data_ecommerce", "query": { "term": { "geoip.continent_name": { "value": "Asia" } } } }, description="Maximum priced ecommerce data by customer_id in Asia", dest={ "index": "kibana_sample_data_ecommerce_transform_v2", "pipeline": "add_timestamp_pipeline" }, frequency="15m", sync={ "time": { "field": "order_date", "delay": "120s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2740b69e7246ac6d1ad249382f21d534.asciidoc000066400000000000000000000010471506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:26 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "my-agg-metric-field": { "type": "aggregate_metric_double", "metrics": [ "min", "max", "sum", "value_count" ], "default_metric": "max" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/274feaaa727e0ddf61b3c0f093182839.asciidoc000066400000000000000000000010341506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:414 [source, python] ---- resp = client.search( index="my-index-000001", runtime_mappings={ "duration": { "type": "long", "script": { "source": "\n emit(doc['measures.end'].value - doc['measures.start'].value);\n " } } }, aggs={ "duration_stats": { "stats": { "field": "duration" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/275ec358d5d1e4b9ff06cb4ae7e47650.asciidoc000066400000000000000000000002731506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template.asciidoc:84 [source, python] ---- resp = client.indices.get_index_template( name="temp*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/27600d6a78623b69689d4218618e4278.asciidoc000066400000000000000000000004121506101734100260030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:47 [source, python] ---- resp = client.search( index="my_index", query={ "term": { "my_counter": 18446744073709552000 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/276e5b71ff5c6879a9b819076ad82301.asciidoc000066400000000000000000000025011506101734100262770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:33 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_dense_vector": { "type": "dense_vector", "index": False, "dims": 3 }, "my_byte_dense_vector": { "type": "dense_vector", "index": False, "dims": 3, "element_type": "byte" }, "status": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "my_dense_vector": [ 0.5, 10, 6 ], "my_byte_dense_vector": [ 0, 10, 6 ], "status": "published" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "my_dense_vector": [ -0.5, 10, 10 ], "my_byte_dense_vector": [ 0, 10, 10 ], "status": "published" }, ) print(resp2) resp3 = client.indices.refresh( index="my-index-000001", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/277fefe2b623af61f8274f73efc97aed.asciidoc000066400000000000000000000012611506101734100267440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/dissect-syntax.asciidoc:115 [source, python] ---- resp = client.scripts_painless_execute( script={ "source": "\n String response=dissect('%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{response} %{size}').extract(doc[\"message\"].value)?.response;\n if (response != null) emit(Integer.parseInt(response)); \n " }, context="long_field", context_setup={ "index": "my-index", "document": { "message": "247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/278d5bfa1a01f91d5c84679ef1bca390.asciidoc000066400000000000000000000002651506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/concurrency-control.asciidoc:61 [source, python] ---- resp = client.get( index="products", id="1567", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2793fa53b7d269852aa74f6bf57e34dc.asciidoc000066400000000000000000000013431506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/ngram-tokenfilter.asciidoc:208 [source, python] ---- resp = client.indices.create( index="ngram_custom_example", settings={ "index": { "max_ngram_diff": 2 }, "analysis": { "analyzer": { "default": { "tokenizer": "whitespace", "filter": [ "3_5_grams" ] } }, "filter": { "3_5_grams": { "type": "ngram", "min_gram": 3, "max_gram": 5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/279e2b29261971999923fdc658bba8ff.asciidoc000066400000000000000000000006271506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:556 [source, python] ---- resp = client.search( source={ "includes": [ "obj1.*", "obj2.*" ], "excludes": [ "*.description" ] }, query={ "term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/27f9f604e7a48799fa30529cbc0ff619.asciidoc000066400000000000000000000013451506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:173 [source, python] ---- resp = client.indices.create( index="delimited_payload_example", settings={ "analysis": { "analyzer": { "whitespace_plus_delimited": { "tokenizer": "whitespace", "filter": [ "plus_delimited" ] } }, "filter": { "plus_delimited": { "type": "delimited_payload", "delimiter": "+", "encoding": "int" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/282e9e845b606f29a5bba174ae4c4c4d.asciidoc000066400000000000000000000013401506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-security.asciidoc:40 [source, python] ---- resp = client.security.create_api_key( name="my-restricted-api-key", expiration="7d", role_descriptors={ "my-restricted-role-descriptor": { "indices": [ { "names": [ "website-product-search" ], "privileges": [ "read" ] } ], "restriction": { "workflows": [ "search_application_query" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/28415647fced5f983b42f8435332a625.asciidoc000066400000000000000000000010351506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:157 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "lowercase": { "field": "my-keyword-field" } } ] }, docs=[ { "_source": { "my-keyword-field": "FOO" } }, { "_source": { "my-keyword-field": "BAR" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/28543836b62b5622a402e6f7731d68f0.asciidoc000066400000000000000000000005211506101734100261200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:421 [source, python] ---- resp = client.indices.downsample( index=".ds-my-data-stream-2023.07.26-000001", target_index=".ds-my-data-stream-2023.07.26-000001-downsample", config={ "fixed_interval": "1h" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2856a5ceff1861aa9a78099f1c517fe7.asciidoc000066400000000000000000000002651506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/troubleshooting.asciidoc:14 [source, python] ---- resp = client.indices.get_mapping( index=".watches", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2864a24608b3ac59d21f604f8a31d131.asciidoc000066400000000000000000000007611506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/jwt-realm.asciidoc:504 [source, python] ---- resp = client.security.put_role( name="jwt_role1", refresh=True, cluster=[ "manage" ], indices=[ { "names": [ "*" ], "privileges": [ "read" ] } ], run_as=[ "user123_runas" ], metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2864d04bf99860ed5dbe1458f1ab5f78.asciidoc000066400000000000000000000004621506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/put-autoscaling-policy.asciidoc:22 [source, python] ---- resp = client.autoscaling.put_autoscaling_policy( name="", policy={ "roles": [], "deciders": { "fixed": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2879d7bf4167194b102bf97117327164.asciidoc000066400000000000000000000007461506101734100260570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/htmlstrip-charfilter.asciidoc:64 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "char_filter": [ "html_strip" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2884eacac3ad05ff794f5296ec7427e7.asciidoc000066400000000000000000000006051506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/knn-query.asciidoc:58 [source, python] ---- resp = client.search( index="my-image-index", size=3, query={ "knn": { "field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 10 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2891aa10ee9d474780adf94d5607f2db.asciidoc000066400000000000000000000005001506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:177 [source, python] ---- resp = client.search( index="index_long,index_double", sort=[ { "field": { "numeric_type": "double" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2897ccc2a3bf3d0cd89328ee4413fae5.asciidoc000066400000000000000000000004031506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:605 [source, python] ---- resp = client.async_search.get( id="FklQYndoTDJ2VEFlMEVBTzFJMGhJVFEaLVlKYndBWWZSMUdicUc4WVlEaFl4ZzoxNTU=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2898cf033b5bdefdbe3723af850b25c5.asciidoc000066400000000000000000000013001506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:53 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "GET /search" } }, collapse={ "field": "user.id", "inner_hits": { "name": "most_recent", "size": 5, "sort": [ { "@timestamp": "desc" } ] }, "max_concurrent_group_searches": 4 }, sort=[ { "http.response.bytes": { "order": "desc" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/28ac880057135e46b3b00c7f3976538c.asciidoc000066400000000000000000000004221506101734100262010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/filtering.asciidoc:122 [source, python] ---- resp = client.indices.put_settings( index="test", settings={ "index.routing.allocation.include._ip": "192.168.2.*" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/291110f4cac02f4610d0853f5800a70d.asciidoc000066400000000000000000000010271506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/weighted-avg-aggregation.asciidoc:214 [source, python] ---- resp = client.search( index="exams", size=0, aggs={ "weighted_grade": { "weighted_avg": { "value": { "field": "grade", "missing": 2 }, "weight": { "field": "weight", "missing": 3 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2932e6f71e247cf52e11d2f38f114ddf.asciidoc000066400000000000000000000004331506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:300 [source, python] ---- resp = client.reindex( slices="5", refresh=True, source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/295b3aaeb223612afdd991744dc9c873.asciidoc000066400000000000000000000006211506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:489 [source, python] ---- resp = client.ingest.put_pipeline( id="my_test_scores_pipeline", description="Calculates the total test score", processors=[ { "script": { "source": "ctx.total_score = (ctx.math_score + ctx.verbal_score)" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2968ffb8135f77ba3a9b876dd4918119.asciidoc000066400000000000000000000006071506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:134 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "azure-ai-studio-embeddings", "pipeline": "azure_ai_studio_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29783e5de3a5f3c985cbf11094cf49a0.asciidoc000066400000000000000000000005761506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc:274 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "keyword_repeat", "stemmer", "remove_duplicates" ], text="fox running and jumping", explain=True, attributes="keyword", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29824032d7d64512d17458fdd687b1f6.asciidoc000066400000000000000000000002761506101734100262210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:144 [source, python] ---- resp = client.tasks.list( parent_task_id="oTUltX4IQMOUUVeiohTt8A:123", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29953082744b7a36e437b392a6391c81.asciidoc000066400000000000000000000004131506101734100260470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:699 [source, python] ---- resp = client.render_search_template( id="my-search-template", params={ "from": 20, "size": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/299900fb08da80fe455cf3f1bb7d62ee.asciidoc000066400000000000000000000003261506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:102 [source, python] ---- resp = client.indices.get_field_mapping( index="publications", fields="title", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29aeabacb1fdf5b083d5f091b6d1bd44.asciidoc000066400000000000000000000004431506101734100270400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:105 [source, python] ---- resp = client.indices.migrate_reindex( reindex={ "source": { "index": "my-data-stream" }, "mode": "upgrade" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29d9df958de292cec50daaf31844b573.asciidoc000066400000000000000000000003531506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:232 [source, python] ---- resp = client.indices.get_field_mapping( index="my-index-000001,my-index-000002", fields="message", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/29e002ab596bae58712eb048ac1768d1.asciidoc000066400000000000000000000005411506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:189 [source, python] ---- resp = client.index( index="my-index-000001", routing="xyz", document={ "@timestamp": "2099-11-15T13:12:00", "message": "You know for search!", "user.id": "xyz" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a1eece9a59ac1773edcf0a932c26de0.asciidoc000066400000000000000000000005121506101734100267650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/oidc-logout-api.asciidoc:53 [source, python] ---- resp = client.security.oidc_logout( token="dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", refresh_token="vLBPvmAB6KvwvJZr27cS", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a21674c40f9b182a8944769d20b2357.asciidoc000066400000000000000000000014771506101734100261320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-vectors.asciidoc:137 [source, python] ---- resp = client.search( index="my-rank-vectors-float", query={ "script_score": { "query": { "match_all": {} }, "script": { "source": "maxSimDotProduct(params.query_vector, 'my_vector')", "params": { "query_vector": [ [ 0.5, 10, 6 ], [ -0.5, 10, 10 ] ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a247e36a86a373bcbf478ac9a588f44.asciidoc000066400000000000000000000005521506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:328 [source, python] ---- resp = client.index( index="my-index-000001", routing="kimchy", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a287d213a812b98d8353c563a058cfc.asciidoc000066400000000000000000000005131506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/boxplot-aggregation.asciidoc:31 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_boxplot": { "boxplot": { "field": "load_time" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a44d254e6e32abe97515fd2eb34705d.asciidoc000066400000000000000000000004351506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:643 [source, python] ---- resp = client.sql.get_async( id="FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", wait_for_completion_timeout="2s", format="json", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a47d11c6e19c9da5104e738359ea8a8.asciidoc000066400000000000000000000003021506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/migrate-to-data-tiers-routing-guide.asciidoc:208 [source, python] ---- resp = client.ilm.start() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a5f7e7d6b92c66e52616845146d2820.asciidoc000066400000000000000000000022001506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/painless-examples.asciidoc:522 [source, python] ---- resp = client.transform.preview_transform( id="index_compare", source={ "index": [ "index1", "index2" ], "query": { "match_all": {} } }, dest={ "index": "compare" }, pivot={ "group_by": { "unique-id": { "terms": { "field": "" } } }, "aggregations": { "compare": { "scripted_metric": { "map_script": "state.doc = new HashMap(params['_source'])", "combine_script": "return state", "reduce_script": " \n if (states.size() != 2) {\n return \"count_mismatch\"\n }\n if (states.get(0).equals(states.get(1))) {\n return \"match\"\n } else {\n return \"mismatch\"\n }\n " } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a67608dadbf220a2f040f3a79d3677d.asciidoc000066400000000000000000000013141506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:162 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information including original binary", processors=[ { "attachment": { "field": "data", "remove_binary": False } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="attachment", document={ "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/2a70194ebd2f01a3229a5092513676b3.asciidoc000066400000000000000000000013631506101734100261600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/htmlstrip-charfilter.asciidoc:106 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "char_filter": [ "my_custom_html_strip_char_filter" ] } }, "char_filter": { "my_custom_html_strip_char_filter": { "type": "html_strip", "escaped_tags": [ "b" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a71e2d7f7179dd76183d30789046808.asciidoc000066400000000000000000000007171506101734100261500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:224 [source, python] ---- resp = client.bulk( index="mv", refresh=True, operations=[ { "index": {} }, { "a": 1, "b": [ 2, 1 ] }, { "index": {} }, { "a": 2, "b": 3 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a91e1fb8ad93a188fa9d77ec01bc431.asciidoc000066400000000000000000000017111506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-inner-hits.asciidoc:90 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "comments": { "type": "nested" } } }, ) print(resp) resp1 = client.index( index="test", id="1", refresh=True, document={ "title": "Test title", "comments": [ { "author": "kimchy", "number": 1 }, { "author": "nik9000", "number": 2 } ] }, ) print(resp1) resp2 = client.search( index="test", query={ "nested": { "path": "comments", "query": { "match": { "comments.number": 2 } }, "inner_hits": {} } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/2a9747bcfaf1f9491ebd410b3fcb6798.asciidoc000066400000000000000000000004541506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:45 [source, python] ---- resp = client.search( query={ "query_string": { "query": "(new york city) OR (big apple)", "default_field": "content" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2a9d3119a9e26e29220be436b9382955.asciidoc000066400000000000000000000010321506101734100262020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:241 [source, python] ---- resp = client.indices.create( index="mistral-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1024, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2aa548b692fc2fe7b6f0d90eb8b2ae29.asciidoc000066400000000000000000000002711506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/delete-watch.asciidoc:66 [source, python] ---- resp = client.watcher.delete_watch( id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2abfe0d3f5593d23d2dfa608b1e2532a.asciidoc000066400000000000000000000015741506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:796 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "user_name": { "terms": { "field": "user_name" } } }, { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ac37c3c572170ded67f1d5a0c8151ab.asciidoc000066400000000000000000000005011506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1204 [source, python] ---- resp = client.eql.search( index="my-data-stream", tiebreaker_field="event.sequence", query="\n process where process.name == \"cmd.exe\" and stringContains(process.executable, \"System32\")\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ac7efe3919ee0c7971f5d502f482662.asciidoc000066400000000000000000000014271506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:159 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "1 / (1 + l1norm(params.queryVector, 'my_dense_vector'))", "params": { "queryVector": [ 4, 3.4, -0.2 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2acf75803494fef29f9ca70671aa6be1.asciidoc000066400000000000000000000003621506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-delete-roles.asciidoc:100 [source, python] ---- resp = client.security.bulk_delete_role( names=[ "my_admin_role", "superuser" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ad35a13262f98574a48f88b4a838512.asciidoc000066400000000000000000000003051506101734100262060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/alias-privileges.asciidoc:92 [source, python] ---- resp = client.get( index="current_year", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ade05fb3fb06a67df25e097dfadb045.asciidoc000066400000000000000000000003101506101734100267600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:125 [source, python] ---- resp = client.get( index="my-index-000001", id="my_id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2aec92bc31bc24bce58d983738f9e0fe.asciidoc000066400000000000000000000007101506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/matrix-stats-aggregation.asciidoc:128 [source, python] ---- resp = client.search( aggs={ "matrixstats": { "matrix_stats": { "fields": [ "poverty", "income" ], "missing": { "income": 50000 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2afc1231679898bd864d06679d9e951b.asciidoc000066400000000000000000000016111506101734100263120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline.asciidoc:202 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "histo": { "date_histogram": { "field": "date", "calendar_interval": "day" }, "aggs": { "categories": { "terms": { "field": "category" } }, "min_bucket_selector": { "bucket_selector": { "buckets_path": { "count": "categories._bucket_count" }, "script": { "source": "params.count != 0" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2afd49985950cbcccf727fa858d00067.asciidoc000066400000000000000000000007501506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:159 [source, python] ---- resp = client.indices.create( index="test-index", query={ "match": { "my_field": "Which country is Paris in?" } }, highlight={ "fields": { "my_field": { "type": "semantic", "number_of_fragments": 2, "order": "score" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2afdf0d83724953aa2875b5fb37d60cc.asciidoc000066400000000000000000000004161506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:384 [source, python] ---- resp = client.esql.async_query_get( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", wait_for_completion_timeout="30s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b1c560f00d9bcf5caaf56c03f6b5962.asciidoc000066400000000000000000000004221506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connector-sync-jobs-api.asciidoc:85 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", params={ "job_type": "full,incremental" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b47be4b712147a429102aef386470ee.asciidoc000066400000000000000000000005541506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:277 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence by process.pid\n [process where process.name == \"regsvr32.exe\"]\n [library where dll.name == \"scrobj.dll\"]\n [network where true]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b59b014349d45bf894aca90b2b1fbe0.asciidoc000066400000000000000000000003141506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:377 [source, python] ---- resp = client.indices.delete_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b5a5f8689f04d095fa86570130ee4d4.asciidoc000066400000000000000000000007401506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:22 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_id": { "type": "keyword" }, "my_join_field": { "type": "join", "relations": { "question": "answer" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b5c69778eb3daba9fbd7242bcc2daf9.asciidoc000066400000000000000000000016721506101734100270710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:729 [source, python] ---- resp = client.security.query_api_keys( size=0, query={ "bool": { "filter": { "term": { "invalidated": True } } } }, aggs={ "invalidated_keys": { "composite": { "sources": [ { "username": { "terms": { "field": "username" } } }, { "key_name": { "terms": { "field": "name" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2b7687e3d7c06824950e00618c297864.asciidoc000066400000000000000000000003131506101734100260560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve-cluster.asciidoc:205 [source, python] ---- resp = client.indices.resolve_cluster( name="my-index*,clust*:my-index*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ba15c066d55a9b26d49b09471151cb4.asciidoc000066400000000000000000000034651506101734100263320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/adjacency-matrix-aggregation.asciidoc:36 [source, python] ---- resp = client.bulk( index="emails", refresh=True, operations=[ { "index": { "_id": 1 } }, { "accounts": [ "hillary", "sidney" ] }, { "index": { "_id": 2 } }, { "accounts": [ "hillary", "donald" ] }, { "index": { "_id": 3 } }, { "accounts": [ "vladimir", "donald" ] } ], ) print(resp) resp1 = client.search( index="emails", size=0, aggs={ "interactions": { "adjacency_matrix": { "filters": { "grpA": { "terms": { "accounts": [ "hillary", "sidney" ] } }, "grpB": { "terms": { "accounts": [ "donald", "mitt" ] } }, "grpC": { "terms": { "accounts": [ "vladimir", "nigel" ] } } } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/2bacdcb278705d944f367cfb984cf4d2.asciidoc000066400000000000000000000010731506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:32 [source, python] ---- resp = client.search( index="my-index-000001", sort=[ { "post_date": { "order": "asc", "format": "strict_date_optional_time_nanos" } }, "user", { "name": "desc" }, { "age": "desc" }, "_score" ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2bc1d52efec2076dc9fc2a3a2d90e8ab.asciidoc000066400000000000000000000005771506101734100270530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/boxplot-aggregation.asciidoc:177 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_boxplot": { "boxplot": { "field": "load_time", "execution_hint": "high_accuracy" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2bc57cd3f32b59b0b44ca63b19cdfcc0.asciidoc000066400000000000000000000010371506101734100267570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:623 [source, python] ---- resp = client.search( index="image-index", knn={ "field": "image-vector", "query_vector": [ 1, 5, -20 ], "k": 5, "num_candidates": 50, "similarity": 36, "filter": { "term": { "file-type": "png" } } }, fields=[ "title" ], source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c079d1ae4819a0c206b9e1aa5623523.asciidoc000066400000000000000000000027371506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/passthrough.asciidoc:11 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "attributes": { "type": "passthrough", "priority": 10, "properties": { "id": { "type": "keyword" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "attributes": { "id": "foo", "zone": 10 } }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "bool": { "must": [ { "match": { "id": "foo" } }, { "match": { "zone": 10 } } ] } }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "bool": { "must": [ { "match": { "attributes.id": "foo" } }, { "match": { "attributes.zone": 10 } } ] } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/2c090fe7ec7b66b3f5c178d71c46323b.asciidoc000066400000000000000000000005541506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:403 [source, python] ---- resp = client.indices.stats( metric="fielddata", human=True, fields="my_join_field", ) print(resp) resp1 = client.nodes.stats( metric="indices", index_metric="fielddata", human=True, fields="my_join_field", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/2c0dbdcf400cde5d36f7c9e6c1101011.asciidoc000066400000000000000000000002441506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/health.asciidoc:107 [source, python] ---- resp = client.cat.health( v=True, ts=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c1e16e9ac24cfea979af2a69900d3c2.asciidoc000066400000000000000000000004021506101734100266320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/put-synonym-rule.asciidoc:113 [source, python] ---- resp = client.synonyms.put_synonym_rule( set_id="my-synonyms-set", rule_id="test-1", synonyms="hello, hi, howdy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c27a8eb6528126f37a843d434cd88b6.asciidoc000066400000000000000000000006261506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:39 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "synonym_graph", "synonyms": [ "dns, domain name system" ] } ], text="domain name system is fragile", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c3207c0c985d253b2ecccc14e69e25a.asciidoc000066400000000000000000000003541506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:412 [source, python] ---- resp = client.indices.add_block( index=".ds-my-data-stream-2023.07.26-000001", block="write", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c3dff44904d3d73ff47f1afe89c7f86.asciidoc000066400000000000000000000004251506101734100266720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:375 [source, python] ---- resp = client.update_by_query( index="my-index-000001", query={ "term": { "user.id": "kimchy" } }, max_docs=1, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c44657adf550b8ade5cf5334106d38b.asciidoc000066400000000000000000000010731506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1404 [source, python] ---- resp = client.search( index="my-index-000001", runtime_mappings={ "http.clientip": { "type": "ip", "script": "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n " } }, query={ "match": { "http.clientip": "40.135.0.0" } }, fields=[ "http.clientip" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2c602b4ee8f22cda2cdf19bad31da0af.asciidoc000066400000000000000000000022001506101734100270770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster.asciidoc:59 [source, python] ---- resp = client.nodes.info() print(resp) resp1 = client.nodes.info( node_id="_all", ) print(resp1) resp2 = client.nodes.info( node_id="_local", ) print(resp2) resp3 = client.nodes.info( node_id="_master", ) print(resp3) resp4 = client.nodes.info( node_id="node_name_goes_here", ) print(resp4) resp5 = client.nodes.info( node_id="node_name_goes_*", ) print(resp5) resp6 = client.nodes.info( node_id="10.0.0.3,10.0.0.4", ) print(resp6) resp7 = client.nodes.info( node_id="10.0.0.*", ) print(resp7) resp8 = client.nodes.info( node_id="_all,master:false", ) print(resp8) resp9 = client.nodes.info( node_id="data:true,ingest:true", ) print(resp9) resp10 = client.nodes.info( node_id="coordinating_only:true", ) print(resp10) resp11 = client.nodes.info( node_id="master:true,voting_only:false", ) print(resp11) resp12 = client.nodes.info( node_id="rack:2", ) print(resp12) resp13 = client.nodes.info( node_id="ra*:2", ) print(resp13) resp14 = client.nodes.info( node_id="ra*:2*", ) print(resp14) ---- python-elasticsearch-9.1.1/docs/examples/2c86840a46242a38cf82024a9321be46.asciidoc000066400000000000000000000011421506101734100261640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:362 [source, python] ---- resp = client.indices.create( index="my-explicit-mappings-books", mappings={ "dynamic": False, "properties": { "name": { "type": "text" }, "author": { "type": "text" }, "release_date": { "type": "date", "format": "yyyy-MM-dd" }, "page_count": { "type": "integer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ceded6ee764adf1aaaac0a1cd25ed5f.asciidoc000066400000000000000000000003621506101734100273360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:418 [source, python] ---- resp = client.cat.indices( v=True, health="red", h="index,status,health", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d01a9e5550b525496757f1bd7f0e706.asciidoc000066400000000000000000000005621506101734100262660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:456 [source, python] ---- resp = client.index( index="my-index-000001", id="1", timeout="5m", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d0244c020075595acb625aa5ba8f455.asciidoc000066400000000000000000000013331506101734100263130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:253 [source, python] ---- resp = client.index( index="idx_keep", id="1", document={ "path": { "to": [ { "foo": [ 3, 2, 1 ] }, { "foo": [ 30, 20, 10 ] } ], "bar": "baz" }, "ids": [ 200, 100, 300, 100 ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d150ff3b6b991b58fea6aa5cc669aa3.asciidoc000066400000000000000000000005121506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-phrase-query.asciidoc:66 [source, python] ---- resp = client.search( query={ "match_phrase": { "message": { "query": "this is a test", "analyzer": "my_analyzer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d2f5ec97aa34ff7822a6a1ed08ef335.asciidoc000066400000000000000000000020031506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:423 [source, python] ---- resp = client.bulk( index="test", refresh=True, operations=[ { "index": { "_index": "test1" } }, { "s": 1, "m": 3.1415 }, { "index": { "_index": "test1" } }, { "s": 2, "m": 1 }, { "index": { "_index": "test2" } }, { "s": 3.1, "m": 2.71828 } ], ) print(resp) resp1 = client.search( index="test*", filter_path="aggregations", aggs={ "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "s": "asc" } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/2d37b02cbf6d30ae11bf239a54ec9423.asciidoc000066400000000000000000000035751506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:316 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "@timestamp": 1516729294000, "model_number": "QVKC92Q", "measures": { "voltage": "5.2", "start": "300", "end": "8675309" } }, { "index": {} }, { "@timestamp": 1516642894000, "model_number": "QVKC92Q", "measures": { "voltage": "5.8", "start": "300", "end": "8675309" } }, { "index": {} }, { "@timestamp": 1516556494000, "model_number": "QVKC92Q", "measures": { "voltage": "5.1", "start": "300", "end": "8675309" } }, { "index": {} }, { "@timestamp": 1516470094000, "model_number": "QVKC92Q", "measures": { "voltage": "5.6", "start": "300", "end": "8675309" } }, { "index": {} }, { "@timestamp": 1516383694000, "model_number": "HG537PU", "measures": { "voltage": "4.2", "start": "400", "end": "8625309" } }, { "index": {} }, { "@timestamp": 1516297294000, "model_number": "HG537PU", "measures": { "voltage": "4.0", "start": "400", "end": "8625309" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d60e3bdfee7afbddee149f40450b8b5.asciidoc000066400000000000000000000004631506101734100270570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:149 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", query={ "query_string": { "query": "@timestamp:foo", "lenient": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d8fcb03de417a71e7888bbdd948a692.asciidoc000066400000000000000000000002611506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/transforms.asciidoc:197 [source, python] ---- resp = client.cat.transforms( v=True, format="json", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2d9b30acd6b5683f39d53494c0dd779c.asciidoc000066400000000000000000000003001506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/restart-cluster.asciidoc:147 [source, python] ---- resp = client.cat.health() print(resp) resp1 = client.cat.recovery() print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/2dad2b0c8ba503228f4b11cecca0b348.asciidoc000066400000000000000000000004101506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:222 [source, python] ---- resp = client.indices.put_data_lifecycle( name="dsl-data-stream", data_retention="7d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2de6885bacb8769b8f22dce253c96b0c.asciidoc000066400000000000000000000010451506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:424 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "match": { "query": "hot porridge", "filter": { "script": { "source": "interval.start > 10 && interval.end < 20 && interval.gaps == 0" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e09666d3ad5ad9afc22763ee6e97a2b.asciidoc000066400000000000000000000005571506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-put.asciidoc:160 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="hourly-snapshots", schedule="1h", name="", repository="my_repository", config={ "indices": [ "data-*", "important" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e364833626c9790c042c8f006fcc999.asciidoc000066400000000000000000000014031506101734100262170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/multiplexer-tokenfilter.asciidoc:36 [source, python] ---- resp = client.indices.create( index="multiplexer_example", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "my_multiplexer" ] } }, "filter": { "my_multiplexer": { "type": "multiplexer", "filters": [ "lowercase", "lowercase, porter_stem" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e36fe22051a47e052e349854d9948b9.asciidoc000066400000000000000000000003141506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/explain.asciidoc:198 [source, python] ---- resp = client.explain( index="my-index-000001", id="0", q="message:search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e3d1b293da93f2a9ecfc26786ec28d6.asciidoc000066400000000000000000000160771506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:60 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, template={ "settings": { "index": { "mode": "time_series", "routing_path": [ "kubernetes.namespace", "kubernetes.host", "kubernetes.node", "kubernetes.pod" ], "number_of_replicas": 0, "number_of_shards": 2 } }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "kubernetes": { "properties": { "container": { "properties": { "cpu": { "properties": { "usage": { "properties": { "core": { "properties": { "ns": { "type": "long" } } }, "limit": { "properties": { "pct": { "type": "float" } } }, "nanocores": { "type": "long", "time_series_metric": "gauge" }, "node": { "properties": { "pct": { "type": "float" } } } } } } }, "memory": { "properties": { "available": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "majorpagefaults": { "type": "long" }, "pagefaults": { "type": "long", "time_series_metric": "gauge" }, "rss": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "usage": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" }, "limit": { "properties": { "pct": { "type": "float" } } }, "node": { "properties": { "pct": { "type": "float" } } } } }, "workingset": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } } } }, "name": { "type": "keyword" }, "start_time": { "type": "date" } } }, "host": { "type": "keyword", "time_series_dimension": True }, "namespace": { "type": "keyword", "time_series_dimension": True }, "node": { "type": "keyword", "time_series_dimension": True }, "pod": { "type": "keyword", "time_series_dimension": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e7844477b41fcfa9efefee4ec0e7101.asciidoc000066400000000000000000000024201506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-using-query-rules.asciidoc:241 [source, python] ---- resp = client.search( index="my-index-000001", retriever={ "rule": { "match_criteria": { "query_string": "puggles", "user_country": "us" }, "ruleset_ids": [ "my-ruleset" ], "retriever": { "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "pugs" } } } }, { "standard": { "query": { "query_string": { "query": "puggles" } } } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e796e5ca59768d4426abbf9a049db3e.asciidoc000066400000000000000000000004131506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/split-index.asciidoc:175 [source, python] ---- resp = client.indices.split( index="my_source_index", target="my_target_index", settings={ "index.number_of_shards": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e7f4b9be999422a12abb680572b13c8.asciidoc000066400000000000000000000002621506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/get-lifecycle.asciidoc:82 [source, python] ---- resp = client.ilm.get_lifecycle( name="my_policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e847378ba26aa64d40186b6e3e6a1da.asciidoc000066400000000000000000000006531506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:159 [source, python] ---- resp = client.search( index="my_index", query={ "script_score": { "query": { "match_all": {} }, "script": { "source": "field('my_counter').asBigInteger(BigInteger.ZERO).floatValue()" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2e93eaaebf75fa4a2451e8a76ffa9f20.asciidoc000066400000000000000000000007531506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:105 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "mappings": { "properties": { "message": { "type": "text" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ebcdd00ccbf26b4c8e6d9c80dfb3d55.asciidoc000066400000000000000000000007561506101734100271410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:170 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "linestring", "coordinates": [ [ -377.03653, 389.897676 ], [ -377.009051, 389.889939 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ec8d757188349a4630e120ba2c98c3b.asciidoc000066400000000000000000000006061506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc:36 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "pattern_replace", "pattern": "(dog)", "replacement": "watch$1" } ], text="foxes jump lazy dogs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ee002e60bd7a38d466e5f0eb0c38946.asciidoc000066400000000000000000000005301506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:375 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "my-index-2099.05.06-000001", "alias": "my-alias", "routing": "1" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ee239df3243c98418f7d9a5c7be4cfd.asciidoc000066400000000000000000000014611506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:203 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_index_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "my_custom_word_delimiter_graph_filter", "flatten_graph" ] } }, "filter": { "my_custom_word_delimiter_graph_filter": { "type": "word_delimiter_graph", "catenate_all": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2eebaeb3983a04ef7a9201c1f4d40dc1.asciidoc000066400000000000000000000033641506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/dissect-syntax.asciidoc:204 [source, python] ---- resp = client.bulk( index="my-index", refresh=True, operations=[ { "index": {} }, { "timestamp": "2020-04-30T14:30:17-05:00", "message": "40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:30:53-05:00", "message": "232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:12-05:00", "message": "26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:19-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] \"GET /french/splash_inet.html HTTP/1.0\" 200 3781" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:22-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:27-05:00", "message": "252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:28-05:00", "message": "not a valid apache log" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f0b2181c434a879a23b4643bdd92575.asciidoc000066400000000000000000000005361506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-settings.asciidoc:82 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001,my-index-000002", ) print(resp) resp1 = client.indices.get_settings( index="_all", ) print(resp1) resp2 = client.indices.get_settings( index="log_2099_*", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/2f195eeb93229e40c4d8f1a6ab4a358c.asciidoc000066400000000000000000000012431506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/fingerprint.asciidoc:39 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "fingerprint": { "fields": [ "user" ] } } ] }, docs=[ { "_source": { "user": { "last_name": "Smith", "first_name": "John", "date_of_birth": "1980-01-15", "is_active": True } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f2580ea420e1836d922fe48fa8ada97.asciidoc000066400000000000000000000003511506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:39 [source, python] ---- resp = client.ccr.delete_auto_follow_pattern( name="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f2fd35905feef0b561c05d70c7064c1.asciidoc000066400000000000000000000002771506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:570 [source, python] ---- resp = client.indices.get_mapping( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f4a55dfeba8851b306ef9c1b216ef54.asciidoc000066400000000000000000000004011506101734100266320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:85 [source, python] ---- resp = client.search( index="bug_reports", query={ "term": { "labels.release": "v1.3.0" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f4e28c81db47547ad39d0926babab12.asciidoc000066400000000000000000000021331506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:689 [source, python] ---- resp = client.indices.create( index="estonian_example", settings={ "analysis": { "filter": { "estonian_stop": { "type": "stop", "stopwords": "_estonian_" }, "estonian_keywords": { "type": "keyword_marker", "keywords": [ "näide" ] }, "estonian_stemmer": { "type": "stemmer", "language": "estonian" } }, "analyzer": { "rebuilt_estonian": { "tokenizer": "standard", "filter": [ "lowercase", "estonian_stop", "estonian_keywords", "estonian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f72a63c73dd672ac2dc3997ad15dd41.asciidoc000066400000000000000000000010261506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:242 [source, python] ---- resp = client.indices.create( index="test-index", mappings={ "properties": { "source_field": { "type": "text", "fields": { "infer_field": { "type": "semantic_text", "inference_id": ".elser-2-elasticsearch" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f9574fee2ebecd6f7d917ee99b26bcc.asciidoc000066400000000000000000000006601506101734100271140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/doc-values.asciidoc:65 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "status_code": { "type": "keyword" }, "session_id": { "type": "keyword", "doc_values": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f98924c3d593ea2b60edb9cef5bee22.asciidoc000066400000000000000000000002741506101734100267330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:484 [source, python] ---- resp = client.indices.forcemerge( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2f9ee29fe49f7d206a41212aa5945296.asciidoc000066400000000000000000000010331506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/create-index-from-source.asciidoc:117 [source, python] ---- resp = client.indices.create_from( source="my-index", dest="my-new-index", create_from={ "settings_override": { "index": { "blocks.write": None, "blocks.read": None, "blocks.read_only": None, "blocks.read_only_allow_delete": None, "blocks.metadata": None } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fa45d74ba9933188c4728f8a9e5372c.asciidoc000066400000000000000000000007531506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:227 [source, python] ---- resp = client.cluster.put_settings( persistent={ "action.auto_create_index": "my-index-000001,index10,-index1*,+ind*" }, ) print(resp) resp1 = client.cluster.put_settings( persistent={ "action.auto_create_index": "false" }, ) print(resp1) resp2 = client.cluster.put_settings( persistent={ "action.auto_create_index": "true" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/2fa7ded8515b32f26c54394ea598f573.asciidoc000066400000000000000000000016701506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/index-templates.asciidoc:123 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "te*", "bar*" ], template={ "settings": { "number_of_shards": 1 }, "mappings": { "_source": { "enabled": True }, "properties": { "host_name": { "type": "keyword" }, "created_at": { "type": "date", "format": "EEE MMM dd HH:mm:ss Z yyyy" } } }, "aliases": { "mydata": {} } }, priority=500, composed_of=[ "component_template1", "runtime_component_template" ], version=3, meta={ "description": "my custom" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fc2c790a85be29bbcba50bdde1493f4.asciidoc000066400000000000000000000003521506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:225 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="my_snapshot_2099.05.06", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fc80a2ad1ca8b2dcb13ed1895b8e861.asciidoc000066400000000000000000000003331506101734100267040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-settings.asciidoc:128 [source, python] ---- resp = client.cluster.put_settings( transient={ "indices.recovery.*": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fd0b3c132b46aa34cc9d92dd2d4bc85.asciidoc000066400000000000000000000006251506101734100267020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/common-grams-tokenfilter.asciidoc:28 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "common_grams", "common_words": [ "is", "the" ] } ], text="the quick fox is brown", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fe28d9a91b3081a9ec4601af8fb7b1c.asciidoc000066400000000000000000000015311506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:716 [source, python] ---- resp = client.indices.create( index="test", mappings={ "dynamic": False, "properties": { "text": { "type": "text" } } }, ) print(resp) resp1 = client.index( index="test", refresh=True, document={ "text": "words words", "flag": "bar" }, ) print(resp1) resp2 = client.index( index="test", refresh=True, document={ "text": "words words", "flag": "foo" }, ) print(resp2) resp3 = client.indices.put_mapping( index="test", properties={ "text": { "type": "text" }, "flag": { "type": "text", "analyzer": "keyword" } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/2fea3e324939cc7e9c396964aeee7111.asciidoc000066400000000000000000000005441506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:256 [source, python] ---- resp = client.search( query={ "match": { "message": { "query": "to be or not to be", "operator": "and", "zero_terms_query": "all" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2fee452baff92b409cbfc8d71eb5fc0e.asciidoc000066400000000000000000000002241506101734100271360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/nodes.asciidoc:361 [source, python] ---- resp = client.cat.nodes( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/2ffa953b29ed0156c9e610daf66b8e48.asciidoc000066400000000000000000000002651506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:410 [source, python] ---- resp = client.ilm.explain_lifecycle( index="timeseries-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/300576666769b78fa6fa26b232837f81.asciidoc000066400000000000000000000003011506101734100261340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/get-autoscaling-capacity.asciidoc:22 [source, python] ---- resp = client.autoscaling.get_autoscaling_capacity() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/305c4cfb2ad4b58b4c319ffbf32336cc.asciidoc000066400000000000000000000007261506101734100267100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:143 [source, python] ---- resp = client.search( index="my-index-000001", script_fields={ "my_doubled_field": { "script": { "lang": "painless", "source": "doc['my_field'].value * params.get('multiplier');", "params": { "multiplier": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3082ae0c3ecdc61808103214631b40c6.asciidoc000066400000000000000000000013711506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/avg-bucket-aggregation.asciidoc:57 [source, python] ---- resp = client.search( size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "avg_monthly_sales": { "avg_bucket": { "buckets_path": "sales_per_month>sales", "gap_policy": "skip", "format": "#,##0.00;(#,##0.00)" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/309f0721145b5c656338a02459c3ff1e.asciidoc000066400000000000000000000005071506101734100261740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:254 [source, python] ---- resp = client.search( index="test", query={ "rank_feature": { "field": "pagerank", "saturation": { "pivot": 8 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/30abc76a39e551f4b52c65002bb6405d.asciidoc000066400000000000000000000003311506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:285 [source, python] ---- resp = client.security.get_api_key( username="myuser", realm_name="native1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/30bd3c0785f3df4795684754adeb5ecb.asciidoc000066400000000000000000000007201506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:97 [source, python] ---- resp = client.render_search_template( source={ "query": { "match": { "message": "{{query_string}}" } }, "from": "{{from}}", "size": "{{size}}" }, params={ "query_string": "hello world", "from": 20, "size": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/30d051f534aeb884176eedb2c11dac85.asciidoc000066400000000000000000000011021506101734100265350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:176 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="my-elastic-rerank", inference_config={ "service": "elasticsearch", "service_settings": { "model_id": ".rerank-v1", "num_threads": 1, "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 1, "max_number_of_allocations": 4 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/30db2702dd0071c72a090b8311d0db09.asciidoc000066400000000000000000000014421506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:201 [source, python] ---- resp = client.search( index="sales", query={ "match": { "body": "elections" } }, aggs={ "top_sites": { "terms": { "field": "domain", "order": { "top_hit": "desc" } }, "aggs": { "top_tags_hits": { "top_hits": {} }, "top_hit": { "max": { "script": { "source": "_score" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/30f3e3b9df46afd12e68bc71f18483b4.asciidoc000066400000000000000000000010211506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:131 [source, python] ---- resp = client.indices.create( index="my-index-000001", ) print(resp) resp1 = client.indices.create( index="my-index-000002", ) print(resp1) resp2 = client.indices.put_mapping( index="my-index-000001,my-index-000002", properties={ "user": { "properties": { "name": { "type": "keyword" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/30fa37c9575fe81a0ea7c12cfc08e277.asciidoc000066400000000000000000000010201506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/copy-to.asciidoc:71 [source, python] ---- resp = client.indices.create( index="bad_example_index", mappings={ "properties": { "field_1": { "type": "text", "copy_to": "field_2" }, "field_2": { "type": "text", "copy_to": "field_3" }, "field_3": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/310bdfb0d0d75bac7bff036a3fe51d4d.asciidoc000066400000000000000000000010231506101734100270250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:145 [source, python] ---- resp = client.ingest.put_pipeline( id="azure_ai_studio_embeddings_pipeline", processors=[ { "inference": { "model_id": "azure_ai_studio_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3166455372f2d96622caff076e91ebe7.asciidoc000066400000000000000000000005341506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:308 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "index": "my-index-000001", "id": "2", "version": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/316cd43feb3b86396483903af1a048b1.asciidoc000066400000000000000000000006411506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:782 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sale_date": { "date_histogram": { "field": "date", "calendar_interval": "year", "missing": "2000/01/01" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3182f26c61fbe5cf89400804533d5ed2.asciidoc000066400000000000000000000006571506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:808 [source, python] ---- resp = client.render_search_template( id="my-search-template", params={ "query_string": "My string", "text_fields": [ { "user_name": "John" }, { "user_name": "kimchy" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31832bd71c31c46a1ccf8d1c210d89d4.asciidoc000066400000000000000000000012041506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:51 [source, python] ---- resp = client.search( index="my-index-*", query={ "bool": { "must": [ { "match": { "user.id": "kimchy" } } ], "must_not": [ { "terms": { "_index": [ "my-index-01" ] } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/318e209cc4d6f306e65cb2f5598a50b1.asciidoc000066400000000000000000000007561506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:194 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "LineString", "coordinates": [ [ -77.03653, 38.897676 ], [ -77.009051, 38.889939 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31a79a57b242713edec6795599ba0d5d.asciidoc000066400000000000000000000006311506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/field-mappings.asciidoc:15 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "my_tokens": { "type": "sparse_vector" }, "my_text_field": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31ab4ec26176857280af630bf84a2823.asciidoc000066400000000000000000000003261506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-sp-metadata.asciidoc:48 [source, python] ---- resp = client.security.saml_service_provider_metadata( realm_name="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31ac1b68dc7c26a1d37350be47ae9381.asciidoc000066400000000000000000000004621506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/completion.asciidoc:12 [source, python] ---- resp = client.indices.create( index="music", mappings={ "properties": { "suggest": { "type": "completion" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31aed390c30bd4f42a5c56253695e53f.asciidoc000066400000000000000000000006621506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/whitespace-analyzer.asciidoc:131 [source, python] ---- resp = client.indices.create( index="whitespace_example", settings={ "analysis": { "analyzer": { "rebuilt_whitespace": { "tokenizer": "whitespace", "filter": [] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/31f4400716500149cccbc19aa06bff66.asciidoc000066400000000000000000000003621506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/dangling-index-delete.asciidoc:19 [source, python] ---- resp = client.dangling_indices.delete_dangling_index( index_uuid="", accept_data_loss=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/320645d771e952af2a67bb7445c3688d.asciidoc000066400000000000000000000022411506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1648 [source, python] ---- resp = client.indices.create( index="sorani_example", settings={ "analysis": { "filter": { "sorani_stop": { "type": "stop", "stopwords": "_sorani_" }, "sorani_keywords": { "type": "keyword_marker", "keywords": [ "mînak" ] }, "sorani_stemmer": { "type": "stemmer", "language": "sorani" } }, "analyzer": { "rebuilt_sorani": { "tokenizer": "standard", "filter": [ "sorani_normalization", "lowercase", "decimal_digit", "sorani_stop", "sorani_keywords", "sorani_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32123981430e5a8b34fe14314fc48429.asciidoc000066400000000000000000000004471506101734100261170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:17 [source, python] ---- resp = client.search( index="my-index-000001,my-index-000002", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3218f8ccd59c8c90349816e0428e8fb8.asciidoc000066400000000000000000000003201506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/circuit-breaker-errors.asciidoc:99 [source, python] ---- resp = client.indices.clear_cache( fielddata=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3250a8d2d2a9619035040e55a03620b9.asciidoc000066400000000000000000000004651506101734100260760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/network/tracers.asciidoc:46 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.http.HttpTracer": "TRACE", "logger.org.elasticsearch.http.HttpBodyTracer": "TRACE" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/327466380bcd55361973b4a96c6dccb2.asciidoc000066400000000000000000000021311506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1698 [source, python] ---- resp = client.indices.create( index="spanish_example", settings={ "analysis": { "filter": { "spanish_stop": { "type": "stop", "stopwords": "_spanish_" }, "spanish_keywords": { "type": "keyword_marker", "keywords": [ "ejemplo" ] }, "spanish_stemmer": { "type": "stemmer", "language": "light_spanish" } }, "analyzer": { "rebuilt_spanish": { "tokenizer": "standard", "filter": [ "lowercase", "spanish_stop", "spanish_keywords", "spanish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32a7acdfb7046966b28f394476c99126.asciidoc000066400000000000000000000003551506101734100263050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:153 [source, python] ---- resp = client.index( index="example", document={ "location": "POINT (-377.03653 389.897676)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32af23a4b0fea6c81c4688ce5fe4ac35.asciidoc000066400000000000000000000010431506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-rank-aggregation.asciidoc:184 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_ranks": { "percentile_ranks": { "field": "load_time", "values": [ 500, 600 ], "hdr": { "number_of_significant_value_digits": 3 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32b7963c5cabbe9cc7d15da62f5edda9.asciidoc000066400000000000000000000005661506101734100270710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-user-profile-data.asciidoc:124 [source, python] ---- resp = client.security.update_user_profile_data( uid="u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", labels={ "direction": "west" }, data={ "app1": { "font": "large" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32b8a5152b47930f2e16c40c8615c7bb.asciidoc000066400000000000000000000031471506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-client.asciidoc:286 [source, python] ---- resp = client.search_application.put( name="my-example-app", search_application={ "indices": [ "example-index" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {\n \"multi_match\" : {\n \"query\": \"{{query}}\",\n \"fields\": [ \"title^4\", \"plot\", \"actors\", \"directors\" ]\n }\n },\n {\n \"multi_match\" : {\n \"query\": \"{{query}}\",\n \"type\": \"phrase_prefix\",\n \"fields\": [ \"title^4\", \"plot\"]\n }\n },\n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", "params": { "query": "", "_es_filters": {}, "_es_aggs": {}, "_es_sort_fields": {}, "size": 10, "from": 0 }, "dictionary": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32c8c86702ccd68eb70f1573409c2a1f.asciidoc000066400000000000000000000014001506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-searchable-snapshot.asciidoc:130 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb" }, "searchable_snapshot": { "snapshot_repository": "backing_repo", "replicate_for": "14d" } } }, "delete": { "min_age": "28d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32cd57666bc80b8cf793d06fa1086669.asciidoc000066400000000000000000000003611506101734100263600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:203 [source, python] ---- resp = client.sql.query( format="tsv", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32ce26b8af95f7ccc2a7bd5e77a39d6c.asciidoc000066400000000000000000000002751506101734100270160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:562 [source, python] ---- resp = client.indices.recovery( index="my-index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/32de5dd306bd014d67053d2f175defcd.asciidoc000066400000000000000000000004001506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/troubleshooting.asciidoc:748 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.xpack.security.authc.saml": "debug" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3312c82f81816bf76629db9582991812.asciidoc000066400000000000000000000013551506101734100260700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/slowlog.asciidoc:135 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index.search.slowlog.threshold.query.warn": "10s", "index.search.slowlog.threshold.query.info": "5s", "index.search.slowlog.threshold.query.debug": "2s", "index.search.slowlog.threshold.query.trace": "500ms", "index.search.slowlog.threshold.fetch.warn": "1s", "index.search.slowlog.threshold.fetch.info": "800ms", "index.search.slowlog.threshold.fetch.debug": "500ms", "index.search.slowlog.threshold.fetch.trace": "200ms", "index.search.slowlog.include.user": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/331caebf810a923644eb6de26e5a97f4.asciidoc000066400000000000000000000007541506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:417 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_join_field": { "type": "join", "relations": { "question": [ "answer", "comment" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3337c817ebd438254505a31e91c91724.asciidoc000066400000000000000000000002761506101734100261230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-data-stream.asciidoc:77 [source, python] ---- resp = client.indices.get_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3341d3bbb53052447a37c92a04c14b70.asciidoc000066400000000000000000000003571506101734100262300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:356 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script="ctx._source.new_field = 'value_of_new_field'", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3343a4cf559060c422d86c786a95e535.asciidoc000066400000000000000000000004341506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/apostrophe-tokenfilter.asciidoc:22 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "apostrophe" ], text="Istanbul'a veya Istanbul'dan", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/334811cfceb6858aeec5b3461717dd63.asciidoc000066400000000000000000000010651506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geoip.asciidoc:188 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", description="Add ip geolocation info", processors=[ { "geoip": { "field": "ip" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="geoip", document={ "ip": "80.231.5.0" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/33610800d9de3c3e6d6b3c611ace7330.asciidoc000066400000000000000000000002661506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:134 [source, python] ---- resp = client.tasks.get( task_id="oTUltX4IQMOUUVeiohTt8A:124", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/336613f48dd95ea993dd3bcce264fd0e.asciidoc000066400000000000000000000010171506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-allocate.asciidoc:116 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "cold": { "actions": { "allocate": { "require": { "box_type": "cold", "storage": "high" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/33732208fc6e6fe1e8d278299681932e.asciidoc000066400000000000000000000004121506101734100262250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:183 [source, python] ---- resp = client.index( index="example", document={ "location": "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3386fe07e90844dbcdbbe7c07f09e04a.asciidoc000066400000000000000000000003071506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/delete-synonyms-set.asciidoc:66 [source, python] ---- resp = client.synonyms.delete_synonym( id="my-synonyms-set", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/339c4e5af9f9069ad9912aa574488b59.asciidoc000066400000000000000000000022411506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:346 [source, python] ---- resp = client.indices.create( index="my-index-bit-vectors", mappings={ "properties": { "my_dense_vector": { "type": "dense_vector", "index": False, "element_type": "bit", "dims": 40 } } }, ) print(resp) resp1 = client.index( index="my-index-bit-vectors", id="1", document={ "my_dense_vector": [ 8, 5, -15, 1, -7 ] }, ) print(resp1) resp2 = client.index( index="my-index-bit-vectors", id="2", document={ "my_dense_vector": [ -1, 115, -3, 4, -128 ] }, ) print(resp2) resp3 = client.index( index="my-index-bit-vectors", id="3", document={ "my_dense_vector": [ 2, 18, -5, 0, -124 ] }, ) print(resp3) resp4 = client.indices.refresh( index="my-index-bit-vectors", ) print(resp4) ---- python-elasticsearch-9.1.1/docs/examples/33b732bb301e99d2161bd2246494f487.asciidoc000066400000000000000000000010251506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:95 [source, python] ---- resp = client.ingest.put_pipeline( id="postal_lookup", processors=[ { "enrich": { "description": "Add 'geo_data' based on 'geo_location'", "policy_name": "postal_policy", "field": "geo_location", "target_field": "geo_data", "shape_relation": "INTERSECTS" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/33d480fc6812ada75756cf5337bc9092.asciidoc000066400000000000000000000004241506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connector-sync-jobs-api.asciidoc:64 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", params={ "from": "0", "size": "2" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/33f148e3d8676de6cc52f58749898a13.asciidoc000066400000000000000000000010451506101734100263170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:278 [source, python] ---- resp = client.search( query={ "dis_max": { "queries": [ { "match_phrase_prefix": { "subject": "quick brown f" } }, { "match_phrase_prefix": { "message": "quick brown f" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/342ddf9121aeddd82fea2464665e25da.asciidoc000066400000000000000000000004461506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/create-connector-api.asciidoc:27 [source, python] ---- resp = client.connector.put( connector_id="my-connector", index_name="search-google-drive", name="My Connector", service_type="google_drive", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/343dd09a8c76987e586858be3bdc51eb.asciidoc000066400000000000000000000030061506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:574 [source, python] ---- resp = client.indices.create( index="my_queries2", settings={ "analysis": { "analyzer": { "wildcard_suffix": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "reverse", "wildcard_edge_ngram" ] }, "wildcard_suffix_search_time": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "reverse" ] } }, "filter": { "wildcard_edge_ngram": { "type": "edge_ngram", "min_gram": 1, "max_gram": 32 } } } }, mappings={ "properties": { "query": { "type": "percolator" }, "my_field": { "type": "text", "fields": { "suffix": { "type": "text", "analyzer": "wildcard_suffix", "search_analyzer": "wildcard_suffix_search_time" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/344b4144244d57f87c6aa4652b100b25.asciidoc000066400000000000000000000003471506101734100261630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-query.asciidoc:167 [source, python] ---- resp = client.index( index="my-index-000001", id="2", document={ "color": "blue" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/346f28d82acb5427c304aa574fea0008.asciidoc000066400000000000000000000013101506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1847 [source, python] ---- resp = client.indices.create( index="thai_example", settings={ "analysis": { "filter": { "thai_stop": { "type": "stop", "stopwords": "_thai_" } }, "analyzer": { "rebuilt_thai": { "tokenizer": "thai", "filter": [ "lowercase", "decimal_digit", "thai_stop" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3477a89d869b1f7f72d50c2ca86c4679.asciidoc000066400000000000000000000003031506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/activate-watch.asciidoc:88 [source, python] ---- resp = client.watcher.activate_watch( watch_id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3487e60e1ae9d4925ce540cd63574385.asciidoc000066400000000000000000000007601506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/boosting-query.asciidoc:18 [source, python] ---- resp = client.search( query={ "boosting": { "positive": { "term": { "text": "apple" } }, "negative": { "term": { "text": "pie tart fruit crumble tree" } }, "negative_boost": 0.5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/34be27141e3a476c138546190101c8bc.asciidoc000066400000000000000000000003701506101734100261570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-vector-tile-api.asciidoc:38 [source, python] ---- resp = client.search_mvt( index="my-index", field="my-geo-field", zoom="15", x="5271", y="12710", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/34d51c54b62e9a160c0ddacc10134bb0.asciidoc000066400000000000000000000005361506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-first-query.asciidoc:10 [source, python] ---- resp = client.search( query={ "span_first": { "match": { "span_term": { "user.id": "kimchy" } }, "end": 3 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/34d63740b58209a3d031212909743925.asciidoc000066400000000000000000000010371506101734100256710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:213 [source, python] ---- resp = client.search( index="openai-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "openai_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35260b615d0b5628c95d7cc814c39bd3.asciidoc000066400000000000000000000007441506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-child-query.asciidoc:141 [source, python] ---- resp = client.search( query={ "has_child": { "type": "child", "query": { "function_score": { "script_score": { "script": "_score * doc['click_count'].value" } } }, "score_mode": "max" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/353020cb30a885ee7f5ce2b141ba574a.asciidoc000066400000000000000000000003331506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/prefix-query.asciidoc:58 [source, python] ---- resp = client.search( query={ "prefix": { "user": "ki" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3541d4a85e27b2c3896a7a7ee98b4b37.asciidoc000066400000000000000000000002431506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // health/health.asciidoc:486 [source, python] ---- resp = client.health_report( verbose=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3544f17cb97b613a2f733707c676f759.asciidoc000066400000000000000000000015231506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filter-aggregation.asciidoc:122 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", aggs={ "f": { "filters": { "filters": { "hats": { "term": { "type": "hat" } }, "t_shirts": { "term": { "type": "t-shirt" } } } }, "aggs": { "avg_price": { "avg": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3545261682af72f4bee57f2bac0a9590.asciidoc000066400000000000000000000002621506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shard-stores.asciidoc:156 [source, python] ---- resp = client.indices.shard_stores( status="green", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35563ef92dddef9d83906d9c43c60d0f.asciidoc000066400000000000000000000006701506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-termvectors.asciidoc:10 [source, python] ---- resp = client.mtermvectors( docs=[ { "_index": "my-index-000001", "_id": "2", "term_statistics": True }, { "_index": "my-index-000001", "_id": "1", "fields": [ "message" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/355d0ee2fcb6c1fc403c6267f710e25a.asciidoc000066400000000000000000000004601506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:722 [source, python] ---- resp = client.reindex( source={ "index": [ "my-index-000001", "my-index-000002" ] }, dest={ "index": "my-new-index-000002" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35a272df8c919a12d7c3106a18245748.asciidoc000066400000000000000000000005421506101734100261770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:956 [source, python] ---- resp = client.ml.infer_trained_model( model_id="lang_ident_model_1", docs=[ { "text": "The fool doth think he is wise, but the wise man knows himself to be a fool." } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35be136ba9df7474a5521631e2a385b1.asciidoc000066400000000000000000000003531506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/explain-lifecycle.asciidoc:56 [source, python] ---- resp = client.indices.explain_data_lifecycle( index=".ds-metrics-2023.03.22-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35c33ef48cf8a4ee368874141622f9d5.asciidoc000066400000000000000000000007361506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:503 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "strings_as_text": { "match_mapping_type": "string", "mapping": { "type": "text" } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35c664285f2e8b7d5d50ca37ae3ba794.asciidoc000066400000000000000000000006371506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:160 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "GET /search" } }, collapse={ "field": "user.id" }, sort=[ "user.id" ], search_after=[ "dd5ce1ad" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35eef1765e9a5991d77592a0c7490fe0.asciidoc000066400000000000000000000005101506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/min-aggregation.asciidoc:99 [source, python] ---- resp = client.search( index="sales", aggs={ "grade_min": { "min": { "field": "grade", "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35f892b475a1770f18328158be7039fd.asciidoc000066400000000000000000000006051506101734100262200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:71 [source, python] ---- resp = client.indices.create( index="my-index-2", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 3, "similarity": "dot_product" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/35fc63cbefce7bc131ad467b5ba209ef.asciidoc000066400000000000000000000004601506101734100270470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/decrease-data-node-disk-usage.asciidoc:79 [source, python] ---- resp = client.cat.allocation( v=True, s="disk.avail", h="node,disk.percent,disk.avail,disk.total,disk.used,disk.indices,shards", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3608e4fcd17dd8d5f88ec9a3db2f5d89.asciidoc000066400000000000000000000004361506101734100267540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/put-synonyms-set.asciidoc:89 [source, python] ---- resp = client.synonyms.put_synonym( id="my-synonyms-set", synonyms_set=[ { "synonyms": "hello => hi => howdy" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/360b3cef34bbddc5d9579ca95f0cb061.asciidoc000066400000000000000000000004731506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:155 [source, python] ---- resp = client.indices.put_mapping( index="my-data-stream", write_index_only=True, properties={ "message": { "type": "text" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/360c4f373e72ba861584ee85bd218124.asciidoc000066400000000000000000000013441506101734100262630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:262 [source, python] ---- resp = client.indices.create( index="test_index", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "lowercase", "porter_stem" ] } } } }, mappings={ "properties": { "query": { "type": "percolator" }, "body": { "type": "text", "analyzer": "my_analyzer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3613f402ee63f0efb6b8d9c6a919b410.asciidoc000066400000000000000000000006641506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:133 [source, python] ---- resp = client.esql.query( format="txt", query="\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", filter={ "range": { "page_count": { "gte": 100, "lte": 200 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/362dfccdb6f7933b22c909542e0b4e0a.asciidoc000066400000000000000000000006351506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:221 [source, python] ---- resp = client.update_by_query( index="my-data-stream", query={ "match": { "user.id": "l7gk7f82" } }, script={ "source": "ctx._source.user.id = params.new_id", "params": { "new_id": "XgdX0NoX" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3649194a97d265a3bc758f8b38f7561e.asciidoc000066400000000000000000000007161506101734100263160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-text-hybrid-search:21 [source, python] ---- resp = client.indices.create( index="semantic-embeddings", mappings={ "properties": { "semantic_text": { "type": "semantic_text" }, "content": { "type": "text", "copy_to": "semantic_text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/365256ebdfa47b449780771d9beba8d9.asciidoc000066400000000000000000000003721506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/check-in-connector-sync-job-api.asciidoc:56 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/_sync_job/my-connector-sync-job/_check_in", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36792c81c053e0555407d1e83e7e054f.asciidoc000066400000000000000000000055351506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:452 [source, python] ---- resp = client.search( index="movies", size=10, retriever={ "rescorer": { "rescore": { "window_size": 50, "query": { "rescore_query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "cosineSimilarity(params.queryVector, 'product-vector_final_stage') + 1.0", "params": { "queryVector": [ -0.5, 90, -10, 14.8, -156 ] } } } } } }, "retriever": { "rrf": { "rank_window_size": 100, "retrievers": [ { "standard": { "query": { "sparse_vector": { "field": "plot_embedding", "inference_id": "my-elser-model", "query": "films that explore psychological depths" } } } }, { "standard": { "query": { "multi_match": { "query": "crime", "fields": [ "plot", "title" ] } } } }, { "knn": { "field": "vector", "query_vector": [ 10, 22, 77 ], "k": 10, "num_candidates": 10 } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36962727b806315b221e8a63e05caddc.asciidoc000066400000000000000000000004621506101734100263320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/explicit-mapping.asciidoc:49 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "employee-id": { "type": "keyword", "index": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36ac0ef9ea63efc431580f7ade8ad53c.asciidoc000066400000000000000000000005641506101734100270050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:78 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "openai-embeddings", "pipeline": "openai_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36b26905c5f96d0b785c3267fb63838d.asciidoc000066400000000000000000000333461506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:422 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "ip": { "type": "ip" }, "version": { "type": "version" }, "missing_keyword": { "type": "keyword" }, "@timestamp": { "type": "date" }, "type_test": { "type": "keyword" }, "@timestamp_pretty": { "type": "date", "format": "dd-MM-yyyy" }, "event_type": { "type": "keyword" }, "event": { "properties": { "category": { "type": "alias", "path": "event_type" } } }, "host": { "type": "keyword" }, "os": { "type": "keyword" }, "bool": { "type": "boolean" }, "uptime": { "type": "long" }, "port": { "type": "long" } } }, ) print(resp) resp1 = client.indices.create( index="my-index-000002", mappings={ "properties": { "ip": { "type": "ip" }, "@timestamp": { "type": "date" }, "@timestamp_pretty": { "type": "date", "format": "yyyy-MM-dd" }, "type_test": { "type": "keyword" }, "event_type": { "type": "keyword" }, "event": { "properties": { "category": { "type": "alias", "path": "event_type" } } }, "host": { "type": "keyword" }, "op_sys": { "type": "keyword" }, "bool": { "type": "boolean" }, "uptime": { "type": "long" }, "port": { "type": "long" } } }, ) print(resp1) resp2 = client.indices.create( index="my-index-000003", mappings={ "properties": { "host_ip": { "type": "ip" }, "@timestamp": { "type": "date" }, "date": { "type": "date" }, "event_type": { "type": "keyword" }, "event": { "properties": { "category": { "type": "alias", "path": "event_type" } } }, "missing_keyword": { "type": "keyword" }, "host": { "type": "keyword" }, "os": { "type": "keyword" }, "bool": { "type": "boolean" }, "uptime": { "type": "long" }, "port": { "type": "long" } } }, ) print(resp2) resp3 = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": { "_id": 1 } }, { "@timestamp": "1234567891", "@timestamp_pretty": "12-12-2022", "missing_keyword": "test", "type_test": "abc", "ip": "10.0.0.1", "event_type": "alert", "host": "doom", "uptime": 0, "port": 1234, "os": "win10", "version": "1.0.0", "id": 11 }, { "index": { "_id": 2 } }, { "@timestamp": "1234567892", "@timestamp_pretty": "13-12-2022", "event_type": "alert", "type_test": "abc", "host": "CS", "uptime": 5, "port": 1, "os": "win10", "version": "1.2.0", "id": 12 }, { "index": { "_id": 3 } }, { "@timestamp": "1234567893", "@timestamp_pretty": "12-12-2022", "event_type": "alert", "type_test": "abc", "host": "farcry", "uptime": 1, "port": 1234, "bool": False, "os": "win10", "version": "2.0.0", "id": 13 }, { "index": { "_id": 4 } }, { "@timestamp": "1234567894", "@timestamp_pretty": "13-12-2022", "event_type": "alert", "type_test": "abc", "host": "GTA", "uptime": 3, "port": 12, "os": "slack", "version": "10.0.0", "id": 14 }, { "index": { "_id": 5 } }, { "@timestamp": "1234567895", "@timestamp_pretty": "17-12-2022", "event_type": "alert", "host": "sniper 3d", "uptime": 6, "port": 1234, "os": "fedora", "version": "20.1.0", "id": 15 }, { "index": { "_id": 6 } }, { "@timestamp": "1234568896", "@timestamp_pretty": "17-12-2022", "event_type": "alert", "host": "doom", "port": 65123, "bool": True, "os": "redhat", "version": "20.10.0", "id": 16 }, { "index": { "_id": 7 } }, { "@timestamp": "1234567897", "@timestamp_pretty": "17-12-2022", "missing_keyword": "yyy", "event_type": "failure", "host": "doom", "uptime": 15, "port": 1234, "bool": True, "os": "redhat", "version": "20.2.0", "id": 17 }, { "index": { "_id": 8 } }, { "@timestamp": "1234567898", "@timestamp_pretty": "12-12-2022", "missing_keyword": "test", "event_type": "success", "host": "doom", "uptime": 16, "port": 512, "os": "win10", "version": "1.2.3", "id": 18 }, { "index": { "_id": 9 } }, { "@timestamp": "1234567899", "@timestamp_pretty": "15-12-2022", "missing_keyword": "test", "event_type": "success", "host": "GTA", "port": 12, "bool": True, "os": "win10", "version": "1.2.3", "id": 19 }, { "index": { "_id": 10 } }, { "@timestamp": "1234567893", "missing_keyword": None, "ip": "10.0.0.5", "event_type": "alert", "host": "farcry", "uptime": 1, "port": 1234, "bool": True, "os": "win10", "version": "1.2.3", "id": 110 } ], ) print(resp3) resp4 = client.bulk( index="my-index-000002", refresh=True, operations=[ { "index": { "_id": 1 } }, { "@timestamp": "1234567991", "type_test": "abc", "ip": "10.0.0.1", "event_type": "alert", "host": "doom", "uptime": 0, "port": 1234, "op_sys": "win10", "id": 21 }, { "index": { "_id": 2 } }, { "@timestamp": "1234567992", "type_test": "abc", "event_type": "alert", "host": "CS", "uptime": 5, "port": 1, "op_sys": "win10", "id": 22 }, { "index": { "_id": 3 } }, { "@timestamp": "1234567993", "type_test": "abc", "@timestamp_pretty": "2022-12-17", "event_type": "alert", "host": "farcry", "uptime": 1, "port": 1234, "bool": False, "op_sys": "win10", "id": 23 }, { "index": { "_id": 4 } }, { "@timestamp": "1234567994", "event_type": "alert", "host": "GTA", "uptime": 3, "port": 12, "op_sys": "slack", "id": 24 }, { "index": { "_id": 5 } }, { "@timestamp": "1234567995", "event_type": "alert", "host": "sniper 3d", "uptime": 6, "port": 1234, "op_sys": "fedora", "id": 25 }, { "index": { "_id": 6 } }, { "@timestamp": "1234568996", "@timestamp_pretty": "2022-12-17", "ip": "10.0.0.5", "event_type": "alert", "host": "doom", "port": 65123, "bool": True, "op_sys": "redhat", "id": 26 }, { "index": { "_id": 7 } }, { "@timestamp": "1234567997", "@timestamp_pretty": "2022-12-17", "event_type": "failure", "host": "doom", "uptime": 15, "port": 1234, "bool": True, "op_sys": "redhat", "id": 27 }, { "index": { "_id": 8 } }, { "@timestamp": "1234567998", "ip": "10.0.0.1", "event_type": "success", "host": "doom", "uptime": 16, "port": 512, "op_sys": "win10", "id": 28 }, { "index": { "_id": 9 } }, { "@timestamp": "1234567999", "ip": "10.0.0.1", "event_type": "success", "host": "GTA", "port": 12, "bool": False, "op_sys": "win10", "id": 29 } ], ) print(resp4) resp5 = client.bulk( index="my-index-000003", refresh=True, operations=[ { "index": { "_id": 1 } }, { "@timestamp": "1334567891", "host_ip": "10.0.0.1", "event_type": "alert", "host": "doom", "uptime": 0, "port": 12, "os": "win10", "id": 31 }, { "index": { "_id": 2 } }, { "@timestamp": "1334567892", "event_type": "alert", "host": "CS", "os": "win10", "id": 32 }, { "index": { "_id": 3 } }, { "@timestamp": "1334567893", "event_type": "alert", "host": "farcry", "bool": True, "os": "win10", "id": 33 }, { "index": { "_id": 4 } }, { "@timestamp": "1334567894", "event_type": "alert", "host": "GTA", "os": "slack", "bool": True, "id": 34 }, { "index": { "_id": 5 } }, { "@timestamp": "1234567895", "event_type": "alert", "host": "sniper 3d", "os": "fedora", "id": 35 }, { "index": { "_id": 6 } }, { "@timestamp": "1234578896", "host_ip": "10.0.0.1", "event_type": "alert", "host": "doom", "bool": True, "os": "redhat", "id": 36 }, { "index": { "_id": 7 } }, { "@timestamp": "1234567897", "event_type": "failure", "missing_keyword": "test", "host": "doom", "bool": True, "os": "redhat", "id": 37 }, { "index": { "_id": 8 } }, { "@timestamp": "1234577898", "event_type": "success", "host": "doom", "os": "win10", "id": 38, "date": "1671235200000" }, { "index": { "_id": 9 } }, { "@timestamp": "1234577899", "host_ip": "10.0.0.5", "event_type": "success", "host": "GTA", "bool": True, "os": "win10", "id": 39 } ], ) print(resp5) ---- python-elasticsearch-9.1.1/docs/examples/36b86b97feedcf5632824eefc251d6ed.asciidoc000066400000000000000000000003651506101734100267460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:484 [source, python] ---- resp = client.search( index="books", query={ "match": { "name": "brave" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36d229f734adcdab00be266a7ce038b1.asciidoc000066400000000000000000000006051506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:404 [source, python] ---- resp = client.indices.create( index="my-bit-vectors", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 40, "element_type": "bit" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36da9668fef56910370f16bfb772cc40.asciidoc000066400000000000000000000003021506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:125 [source, python] ---- resp = client.indices.stats( metric="request_cache", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36e09bbd5896498ede0f5d37a18eae2c.asciidoc000066400000000000000000000006011506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/parent-id-query.asciidoc:60 [source, python] ---- resp = client.index( index="my-index-000001", id="2", routing="1", refresh=True, document={ "text": "This is a child document.", "my-join-field": { "name": "my-child", "parent": "1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/36fae9dfc0b815546b45745bac054b67.asciidoc000066400000000000000000000003761506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:496 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "model_number": "HG537PU" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/370b297ed3433577adf53e64f572d89d.asciidoc000066400000000000000000000003641506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/delete-connector-sync-job-api.asciidoc:52 [source, python] ---- resp = client.perform_request( "DELETE", "/_connector/_sync_job/my-connector-sync-job-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/371962cf63e65c10026177c6a1bad0b6.asciidoc000066400000000000000000000002471506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/start-slm.asciidoc:63 [source, python] ---- resp = client.slm.start() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3722dad876023e0757138dd5a6d3240e.asciidoc000066400000000000000000000006641506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/create-index-from-source.asciidoc:63 [source, python] ---- resp = client.indices.create( index="my-index", settings={ "index": { "number_of_shards": 3, "blocks.write": True } }, mappings={ "properties": { "field1": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37530f35f315b9f35e3e6a13cf2a1ccd.asciidoc000066400000000000000000000010731506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:731 [source, python] ---- resp = client.search( aggs={ "actors": { "terms": { "field": "actors", "size": 10, "collect_mode": "breadth_first" }, "aggs": { "costars": { "terms": { "field": "actors", "size": 5 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3758b8f2ab9f6f28a764ee6c42c85766.asciidoc000066400000000000000000000010621506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:550 [source, python] ---- resp = client.search( index="my-index-000001", scroll="1m", slice={ "id": 0, "max": 2 }, query={ "match": { "message": "foo" } }, ) print(resp) resp1 = client.search( index="my-index-000001", scroll="1m", slice={ "id": 1, "max": 2 }, query={ "match": { "message": "foo" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/3759ca688c4bd3c838780a9aad63258b.asciidoc000066400000000000000000000003001506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template.asciidoc:41 [source, python] ---- resp = client.indices.get_index_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/375bf2c51ce6cc386f9d4d635d5e84a7.asciidoc000066400000000000000000000004651506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:345 [source, python] ---- resp = client.search( index="my_locations", query={ "geo_grid": { "location": { "geohex": "811fbffffffffff" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/376fbc965e1b093f6dbc198a94c83aa9.asciidoc000066400000000000000000000027721506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:260 [source, python] ---- resp = client.bulk( index="my-index", refresh=True, operations=[ { "index": {} }, { "gc": "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 384K, committed 384K, reserved 1048576K" }, { "index": {} }, { "gc": "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] class space used 15255K, capacity 16726K, committed 16844K, reserved 1048576K" }, { "index": {} }, { "gc": "[2021-03-24T20:27:24.184+0000][90239][gc,heap,exit] Metaspace used 115409K, capacity 119541K, committed 120248K, reserved 1153024K" }, { "index": {} }, { "gc": "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] class space used 14503K, capacity 15894K, committed 15948K, reserved 1048576K" }, { "index": {} }, { "gc": "[2021-04-19T15:03:21.735+0000][84408][gc,heap,exit] Metaspace used 107719K, capacity 111775K, committed 112724K, reserved 1146880K" }, { "index": {} }, { "gc": "[2021-04-27T16:16:34.699+0000][82460][gc,heap,exit] class space used 266K, capacity 367K, committed 384K, reserved 1048576K" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/376ff4b2b5f657481af78a778aaab57f.asciidoc000066400000000000000000000024521506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:154 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "nr": { "type": "integer" }, "state": { "type": "keyword" } } }, ) print(resp) resp1 = client.bulk( index="my-index", refresh=True, operations=[ { "index": {} }, { "nr": 1, "state": "started" }, { "index": {} }, { "nr": 2, "state": "stopped" }, { "index": {} }, { "nr": 3, "state": "N/A" }, { "index": {} }, { "nr": 4 } ], ) print(resp1) resp2 = client.search( index="my-index", filter_path="aggregations", aggs={ "my_top_metrics": { "top_metrics": { "metrics": { "field": "state", "missing": "N/A" }, "sort": { "nr": "desc" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/377af0ea9b19c113f224d8150890b41b.asciidoc000066400000000000000000000042701506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:412 [source, python] ---- resp = client.search( query={ "bool": { "filter": [ { "term": { "event.outcome": "failure" } }, { "range": { "@timestamp": { "gte": "2021-02-01", "lt": "2021-02-04" } } }, { "term": { "service.name": { "value": "frontend-node" } } } ] } }, aggs={ "failure_p_value": { "significant_terms": { "field": "user_agent.version", "background_filter": { "bool": { "must_not": [ { "term": { "event.outcome": "failure" } } ], "filter": [ { "range": { "@timestamp": { "gte": "2021-02-01", "lt": "2021-02-04" } } }, { "term": { "service.name": { "value": "frontend-node" } } } ] } }, "p_value": { "background_is_superset": False, "normalize_above": 1000 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/378e55f78fa13578a1302bae8d479765.asciidoc000066400000000000000000000004651506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-query.asciidoc:134 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "color": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37983daac3d9c8582583a507b3adb7f2.asciidoc000066400000000000000000000004311506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shutdown/apis/shutdown-delete.asciidoc:57 [source, python] ---- resp = client.shutdown.put_node( node_id="USpTGYaBSIKbgSUJR2Z9lg", type="restart", reason="Demonstrating how the node shutdown API works", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37ae7c3e4d6d954487ec4185fe7d9ec8.asciidoc000066400000000000000000000010051506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:130 [source, python] ---- resp = client.search( aggregations={ "forces": { "terms": { "field": "force" }, "aggregations": { "significant_crime_types": { "significant_terms": { "field": "crime_type" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37b84f2ab7c2f6b4fe0e14cc7e018b1f.asciidoc000066400000000000000000000021321506101734100267060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/bi-directional-disaster-recovery.asciidoc:41 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "clusterB": { "mode": "proxy", "skip_unavailable": True, "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com", "proxy_socket_connections": 18, "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400" } } } }, ) print(resp) resp1 = client.cluster.put_settings( persistent={ "cluster": { "remote": { "clusterA": { "mode": "proxy", "skip_unavailable": True, "server_name": "clustera.es.region-a.gcp.elastic-cloud.com", "proxy_socket_connections": 18, "proxy_address": "clustera.es.region-a.gcp.elastic-cloud.com:9400" } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/37c73410bf13429279cbc61a413957d8.asciidoc000066400000000000000000000003021506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:558 [source, python] ---- resp = client.cluster.stats( filter_path="indices.shards.total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37eaab0630976d3dee90a52011342883.asciidoc000066400000000000000000000007341506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stop-tokenfilter.asciidoc:106 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "whitespace", "filter": [ "stop" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/37f1f2e75ed95308ae436bbbb8d5645e.asciidoc000066400000000000000000000002701506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/start-trial.asciidoc:44 [source, python] ---- resp = client.license.post_start_trial( acknowledge=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3819d0a5c2eed635c88e9e7bf2e81584.asciidoc000066400000000000000000000004301506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/revert-snapshot.asciidoc:84 [source, python] ---- resp = client.ml.revert_model_snapshot( job_id="low_request_rate", snapshot_id="1637092688", delete_intervening_results=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/386eb7dcd3149db82605bf22c5d851bf.asciidoc000066400000000000000000000004101506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:373 [source, python] ---- resp = client.security.create_api_key( name="application-key-1", metadata={ "application": "my-application" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/388d3eda4f792d3fce044777739217e6.asciidoc000066400000000000000000000007301506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:442 [source, python] ---- resp = client.ml.evaluate_data_frame( index="animal_classification", evaluation={ "classification": { "actual_field": "animal_class", "predicted_field": "ml.animal_class_prediction", "metrics": { "multiclass_confusion_matrix": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/388ec2b038d3ad69378f4c2e5bc36dce.asciidoc000066400000000000000000000015621506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-field-masking-query.asciidoc:16 [source, python] ---- resp = client.search( query={ "span_near": { "clauses": [ { "span_term": { "text": "quick brown" } }, { "span_field_masking": { "query": { "span_term": { "text.stems": "fox" } }, "field": "text" } } ], "slop": 5, "in_order": False } }, highlight={ "require_field_match": False, "fields": { "*": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/38af4a55c1ea0f908dc7b06d680d2789.asciidoc000066400000000000000000000003251506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:507 [source, python] ---- resp = client.indices.create_data_stream( name="new-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/38b20fe981605e80a41517e9aa13134a.asciidoc000066400000000000000000000015501506101734100262430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-selector-aggregation.asciidoc:51 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "total_sales": { "sum": { "field": "price" } }, "sales_bucket_filter": { "bucket_selector": { "buckets_path": { "totalSales": "total_sales" }, "script": "params.totalSales > 200" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/38eed000de433b540116928681c520d3.asciidoc000066400000000000000000000003441506101734100261610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/preview-datafeed.asciidoc:116 [source, python] ---- resp = client.ml.preview_datafeed( datafeed_id="datafeed-high_sum_total_sales", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/38f7739f750f1411bccf511a0abaaea3.asciidoc000066400000000000000000000002451506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/field-caps.asciidoc:18 [source, python] ---- resp = client.field_caps( fields="rating", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/38ffa96674b5fd4042589af0ebb0437b.asciidoc000066400000000000000000000005541506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-ldap-realm.asciidoc:152 [source, python] ---- resp = client.security.put_role_mapping( name="basic_users", roles=[ "user" ], rules={ "field": { "groups": "cn=users,dc=example,dc=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3924ee252581ebb96ac0e60046125ae8.asciidoc000066400000000000000000000002721506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-users.asciidoc:68 [source, python] ---- resp = client.security.get_user( username="jacknich", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3951d7fcd7f849fa278daf342872125a.asciidoc000066400000000000000000000003131506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:378 [source, python] ---- resp = client.indices.analyze( index="analyze_sample", text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/39760996f94ad34aaceaa16a5cc97993.asciidoc000066400000000000000000000003061506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shutdown/apis/shutdown-get.asciidoc:67 [source, python] ---- resp = client.shutdown.get_node( node_id="USpTGYaBSIKbgSUJR2Z9lg", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/397ab5f9ea0b69ae85038bb0b9915180.asciidoc000066400000000000000000000003241506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:523 [source, python] ---- resp = client.indices.data_streams_stats( name="datastream", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/397bdb40d0146102f1f4c6a35675e16a.asciidoc000066400000000000000000000021401506101734100263160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/stemming.asciidoc:11 [source, python] ---- resp = client.indices.create( index="index", settings={ "analysis": { "analyzer": { "english_exact": { "tokenizer": "standard", "filter": [ "lowercase" ] } } } }, mappings={ "properties": { "body": { "type": "text", "analyzer": "english", "fields": { "exact": { "type": "text", "analyzer": "english_exact" } } } } }, ) print(resp) resp1 = client.index( index="index", id="1", document={ "body": "Ski resort" }, ) print(resp1) resp2 = client.index( index="index", id="2", document={ "body": "A pair of skis" }, ) print(resp2) resp3 = client.indices.refresh( index="index", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/398389933901b572a06a752bc780af7c.asciidoc000066400000000000000000000007271506101734100262200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-anthropic.asciidoc:137 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="anthropic_completion", inference_config={ "service": "anthropic", "service_settings": { "api_key": "", "model_id": "" }, "task_settings": { "max_tokens": 1024 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/39963032d423e2f20f53c4621b6ca3c6.asciidoc000066400000000000000000000003241506101734100262420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/ngram-tokenizer.asciidoc:24 [source, python] ---- resp = client.indices.analyze( tokenizer="ngram", text="Quick Fox", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/39ce44333d28ed2b833722d3e3cb06f3.asciidoc000066400000000000000000000017511506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:187 [source, python] ---- resp = client.search( include_named_queries_score=True, query={ "bool": { "should": [ { "match": { "name.first": { "query": "shay", "_name": "first" } } }, { "match": { "name.last": { "query": "banon", "_name": "last" } } } ], "filter": { "terms": { "name.last": [ "banon", "kimchy" ], "_name": "test" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/39d6f575c9458d9c941364dfd0493fa0.asciidoc000066400000000000000000000003331506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-calendar-event.asciidoc:118 [source, python] ---- resp = client.ml.get_calendar_events( calendar_id="planned-outages", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a12feb0de224bfaaf518d95b9f516ff.asciidoc000066400000000000000000000027611506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/put-watch.asciidoc:126 [source, python] ---- resp = client.watcher.put_watch( id="my-watch", trigger={ "schedule": { "cron": "0 0/1 * * * ?" } }, input={ "search": { "request": { "indices": [ "logstash*" ], "body": { "query": { "bool": { "must": { "match": { "response": 404 } }, "filter": { "range": { "@timestamp": { "from": "{{ctx.trigger.scheduled_time}}||-5m", "to": "{{ctx.trigger.triggered_time}}" } } } } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, actions={ "email_admin": { "email": { "to": "admin@domain.host.com", "subject": "404 recently encountered" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a204b57072a104d9b50f3a9e064a8f6.asciidoc000066400000000000000000000005341506101734100263220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:620 [source, python] ---- resp = client.search( index=".ml-anomalies-custom-example", size=0, aggs={ "job_ids": { "terms": { "field": "job_id", "size": 100 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a2953fd81d65118a776c87a81530e15.asciidoc000066400000000000000000000006651506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:605 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "order": "score", "fields": { "comment": { "fragment_size": 150, "number_of_fragments": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a2f37f8f32b1aa6bcfb252b9e00f904.asciidoc000066400000000000000000000003771506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules.asciidoc:97 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "mode": "standard" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a3adae6dbb2c0316a7d98d0a6c1d4f8.asciidoc000066400000000000000000000020001506101734100267470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:342 [source, python] ---- resp = client.search( index="quantized-image-index", knn={ "field": "image-vector", "query_vector": [ 0.1, -2 ], "k": 15, "num_candidates": 100 }, fields=[ "title" ], rescore={ "window_size": 10, "query": { "rescore_query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "cosineSimilarity(params.query_vector, 'image-vector') + 1.0", "params": { "query_vector": [ 0.1, -2 ] } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a3e6e2627cafa08e4402a0de95785cc.asciidoc000066400000000000000000000012031506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:207 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "you know for search" } }, collapse={ "field": "user.id" }, rescore={ "window_size": 50, "query": { "rescore_query": { "match_phrase": { "message": "you know for search" } }, "query_weight": 0.3, "rescore_query_weight": 1.4 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a489743e49902df38e3368cae00717a.asciidoc000066400000000000000000000002611506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/high-cpu-usage.asciidoc:47 [source, python] ---- resp = client.nodes.hot_threads() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a4953663a5a3809b692c27446e16b7f.asciidoc000066400000000000000000000006051506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:206 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "amazon-bedrock-embeddings", "pipeline": "amazon_bedrock_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a5f2e2313614ea9693545edee22ac43.asciidoc000066400000000000000000000004011506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delete-service-token.asciidoc:53 [source, python] ---- resp = client.security.delete_service_token( namespace="elastic", service="fleet-server", name="token42", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a6238835c7d9f51e6d91f92885fadeb.asciidoc000066400000000000000000000010371506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:13 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "post_date": { "type": "date" }, "user": { "type": "keyword" }, "name": { "type": "keyword" }, "age": { "type": "integer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3a64ae799cc03fadbb802794730c23da.asciidoc000066400000000000000000000010611506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:86 [source, python] ---- resp = client.indices.create( index="example_points", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.index( index="example_points", id="1", refresh=True, document={ "name": "Wind & Wetter, Berlin, Germany", "location": [ 13.400544, 52.530286 ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/3aa0e2d25a51bf5f3f0bda7fd8403bf2.asciidoc000066400000000000000000000012451506101734100267530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stop-tokenfilter.asciidoc:183 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "default": { "tokenizer": "whitespace", "filter": [ "my_custom_stop_words_filter" ] } }, "filter": { "my_custom_stop_words_filter": { "type": "stop", "ignore_case": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ab8f65fcb55a0e3664c55749ec41efd.asciidoc000066400000000000000000000022731506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1407 [source, python] ---- resp = client.indices.create( index="persian_example", settings={ "analysis": { "char_filter": { "zero_width_spaces": { "type": "mapping", "mappings": [ "\\u200C=>\\u0020" ] } }, "filter": { "persian_stop": { "type": "stop", "stopwords": "_persian_" } }, "analyzer": { "rebuilt_persian": { "tokenizer": "standard", "char_filter": [ "zero_width_spaces" ], "filter": [ "lowercase", "decimal_digit", "arabic_normalization", "persian_normalization", "persian_stop", "persian_stem" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3abedc1d68fe1d20621157406b2b1de0.asciidoc000066400000000000000000000016211506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc:359 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "filter": [ "my_custom_word_delimiter_filter" ] } }, "filter": { "my_custom_word_delimiter_filter": { "type": "word_delimiter", "type_table": [ "- => ALPHA" ], "split_on_case_change": False, "split_on_numerics": False, "stem_english_possessive": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ac075c5b5bbe648d40d06cce3061367.asciidoc000066400000000000000000000007321506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:577 [source, python] ---- resp = client.render_search_template( source="{ \"query\": { \"bool\": { \"filter\": [ {{#year_scope}} { \"range\": { \"@timestamp\": { \"gte\": \"now-1y/d\", \"lt\": \"now/d\" } } }, {{/year_scope}} { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", params={ "year_scope": False, "user_id": "kimchy" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ac8b5234e9d53859245cf8ab0094ca5.asciidoc000066400000000000000000000003031506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-job.asciidoc:74 [source, python] ---- resp = client.ml.delete_job( job_id="total-requests", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3af10fde8138d9d95df127d39d9a0ed2.asciidoc000066400000000000000000000004111506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/troubleshooting-shards-capacity.asciidoc:223 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.max_shards_per_node": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3afc6dacf90b42900ab571aad8a61d75.asciidoc000066400000000000000000000022111506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1599 [source, python] ---- resp = client.indices.create( index="serbian_example", settings={ "analysis": { "filter": { "serbian_stop": { "type": "stop", "stopwords": "_serbian_" }, "serbian_keywords": { "type": "keyword_marker", "keywords": [ "пример" ] }, "serbian_stemmer": { "type": "stemmer", "language": "serbian" } }, "analyzer": { "rebuilt_serbian": { "tokenizer": "standard", "filter": [ "lowercase", "serbian_stop", "serbian_keywords", "serbian_stemmer", "serbian_normalization" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b0475515ee692a2d9850c2bd7cdb895.asciidoc000066400000000000000000000014251506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:648 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "unindexed_longs": { "match_mapping_type": "long", "mapping": { "type": "long", "index": False } } }, { "unindexed_doubles": { "match_mapping_type": "double", "mapping": { "type": "float", "index": False } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b04cc894e6a47d57983484010feac0c.asciidoc000066400000000000000000000004131506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:869 [source, python] ---- resp = client.get( index="metricbeat-2016.05.30-1", id="1", ) print(resp) resp1 = client.get( index="metricbeat-2016.05.31-1", id="1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/3b05128cba6852e79a905bcdd5a8ebc0.asciidoc000066400000000000000000000003361506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:374 [source, python] ---- resp = client.search( index="my-index-000001", size="surprise_me", error_trace=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b162509ed14eda44a9681cd1108fa39.asciidoc000066400000000000000000000013311506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/phrase-suggest.asciidoc:80 [source, python] ---- resp = client.search( index="test", suggest={ "text": "noble prize", "simple_phrase": { "phrase": { "field": "title.trigram", "size": 1, "gram_size": 3, "direct_generator": [ { "field": "title.trigram", "suggest_mode": "always" } ], "highlight": { "pre_tag": "", "post_tag": "" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b18e9de638ff0b1c7a1f1f6bf1c24f3.asciidoc000066400000000000000000000003111506101734100267100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-app-privileges.asciidoc:94 [source, python] ---- resp = client.security.get_privileges( application="myapp", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b1ff884f3bab390ae357e622c0544a9.asciidoc000066400000000000000000000031031506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rrf.asciidoc:186 [source, python] ---- resp = client.indices.create( index="example-index", mappings={ "properties": { "text": { "type": "text" }, "vector": { "type": "dense_vector", "dims": 1, "index": True, "similarity": "l2_norm", "index_options": { "type": "hnsw" } }, "integer": { "type": "integer" } } }, ) print(resp) resp1 = client.index( index="example-index", id="1", document={ "text": "rrf", "vector": [ 5 ], "integer": 1 }, ) print(resp1) resp2 = client.index( index="example-index", id="2", document={ "text": "rrf rrf", "vector": [ 4 ], "integer": 2 }, ) print(resp2) resp3 = client.index( index="example-index", id="3", document={ "text": "rrf rrf rrf", "vector": [ 3 ], "integer": 1 }, ) print(resp3) resp4 = client.index( index="example-index", id="4", document={ "text": "rrf rrf rrf rrf", "integer": 2 }, ) print(resp4) resp5 = client.index( index="example-index", id="5", document={ "vector": [ 0 ], "integer": 1 }, ) print(resp5) resp6 = client.indices.refresh( index="example-index", ) print(resp6) ---- python-elasticsearch-9.1.1/docs/examples/3b40db1c5c6b36f087d7a09a4ce285c6.asciidoc000066400000000000000000000002501506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template.asciidoc:93 [source, python] ---- resp = client.indices.get_index_template() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b606631284877f9bca15051630995ad.asciidoc000066400000000000000000000010101506101734100261130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:441 [source, python] ---- resp = client.search( index="my_test_scores", query={ "term": { "grad_year": "2099" } }, sort=[ { "_script": { "type": "number", "script": { "source": "doc['math_score'].value + doc['verbal_score'].value" }, "order": "desc" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b64821fe9db73eb03860c60d775d7ff.asciidoc000066400000000000000000000011151506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-cross-cluster-api-key.asciidoc:197 [source, python] ---- resp = client.perform_request( "PUT", "/_security/cross_cluster/api_key/VuaCfGcBCdbkQm-e5aOx", headers={"Content-Type": "application/json"}, body={ "access": { "replication": [ { "names": [ "archive" ] } ] }, "metadata": { "application": "replication" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b8ab7027e0d616fb432acd8813e086c.asciidoc000066400000000000000000000005401506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:544 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3b9c54604535d97e8368d47148aecc6f.asciidoc000066400000000000000000000004411506101734100263640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/update-snapshot.asciidoc:55 [source, python] ---- resp = client.ml.update_model_snapshot( job_id="it_ops_new_logs", snapshot_id="1491852978", description="Snapshot 1", retain=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ba2896bcc724c27be8f0decf6f81813.asciidoc000066400000000000000000000006741506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // monitoring/indices.asciidoc:126 [source, python] ---- resp = client.indices.put_template( name="custom_monitoring", index_patterns=[ ".monitoring-beats-7-*", ".monitoring-es-7-*", ".monitoring-kibana-7-*", ".monitoring-logstash-7-*" ], order=1, settings={ "number_of_shards": 5, "number_of_replicas": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3bb491db29deba25e1cc82bcaa1aa1a1.asciidoc000066400000000000000000000005201506101734100270670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:781 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001" }, script={ "source": "ctx._source.tag = ctx._source.remove(\"flag\")" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3bb5951a9e1186af5d154f56ffc13502.asciidoc000066400000000000000000000015061506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/ignore-above.asciidoc:10 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "message": { "type": "keyword", "ignore_above": 20 } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "message": "Syntax error" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "message": "Syntax error with some long stacktrace" }, ) print(resp2) resp3 = client.search( index="my-index-000001", aggs={ "messages": { "terms": { "field": "message" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/3bc4a3681e3ea9cb3de49f72085807d8.asciidoc000066400000000000000000000042651506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:321 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "linear": { "retrievers": [ { "retriever": { "standard": { "query": { "function_score": { "query": { "term": { "topic": "ai" } }, "functions": [ { "script_score": { "script": { "source": "doc['timestamp'].value.millis" } } } ], "boost_mode": "replace" } }, "sort": { "timestamp": { "order": "asc" } } } }, "weight": 2, "normalizer": "minmax" }, { "retriever": { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } }, "weight": 1.5 } ], "rank_window_size": 10 } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3bc872dbcdad8ff02cbaea39e7f38352.asciidoc000066400000000000000000000005051506101734100270560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:204 [source, python] ---- resp = client.indices.create( index="index_double", mappings={ "properties": { "field": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3bfa2362add163802fc2210cc2f37ba2.asciidoc000066400000000000000000000004611506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/clone-snapshot-api.asciidoc:16 [source, python] ---- resp = client.snapshot.clone( repository="my_repository", snapshot="source_snapshot", target_snapshot="target_snapshot", indices="index_a,index_b", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c04f75bcbb07125d51b21b9b2c9f6f0.asciidoc000066400000000000000000000020051506101734100265330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/index-field.asciidoc:11 [source, python] ---- resp = client.index( index="index_1", id="1", document={ "text": "Document in index 1" }, ) print(resp) resp1 = client.index( index="index_2", id="2", refresh=True, document={ "text": "Document in index 2" }, ) print(resp1) resp2 = client.search( index="index_1,index_2", query={ "terms": { "_index": [ "index_1", "index_2" ] } }, aggs={ "indices": { "terms": { "field": "_index", "size": 10 } } }, sort=[ { "_index": { "order": "asc" } } ], script_fields={ "index_name": { "script": { "lang": "painless", "source": "doc['_index']" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/3c09ca91057216125ed0e3856a91ff95.asciidoc000066400000000000000000000156311506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-ilm.asciidoc:91 [source, python] ---- resp = client.indices.put_index_template( name="datastream_template", index_patterns=[ "datastream*" ], data_stream={}, template={ "settings": { "index": { "mode": "time_series", "number_of_replicas": 0, "number_of_shards": 2 }, "index.lifecycle.name": "datastream_policy" }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "kubernetes": { "properties": { "container": { "properties": { "cpu": { "properties": { "usage": { "properties": { "core": { "properties": { "ns": { "type": "long" } } }, "limit": { "properties": { "pct": { "type": "float" } } }, "nanocores": { "type": "long", "time_series_metric": "gauge" }, "node": { "properties": { "pct": { "type": "float" } } } } } } }, "memory": { "properties": { "available": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "majorpagefaults": { "type": "long" }, "pagefaults": { "type": "long", "time_series_metric": "gauge" }, "rss": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "usage": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" }, "limit": { "properties": { "pct": { "type": "float" } } }, "node": { "properties": { "pct": { "type": "float" } } } } }, "workingset": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } } } }, "name": { "type": "keyword" }, "start_time": { "type": "date" } } }, "host": { "type": "keyword", "time_series_dimension": True }, "namespace": { "type": "keyword", "time_series_dimension": True }, "node": { "type": "keyword", "time_series_dimension": True }, "pod": { "type": "keyword", "time_series_dimension": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c0d0c38e1c819a35a68cdba5ae8ccc4.asciidoc000066400000000000000000000010071506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:262 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="alibabacloud_ai_search_embeddings", inference_config={ "service": "alibabacloud-ai-search", "service_settings": { "api_key": "", "service_id": "", "host": "", "workspace": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c345feb7c52fd54bcb5d5505fd8bc3b.asciidoc000066400000000000000000000006521506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:1115 [source, python] ---- resp = client.ml.infer_trained_model( model_id="model2", docs=[ { "text_field": "" } ], inference_config={ "question_answering": { "question": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c36dc17359c6b6b6a40d04da9293fa7.asciidoc000066400000000000000000000014361506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:393 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.unweightedAvg(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c5d5a5c34a62724942329658c688f5e.asciidoc000066400000000000000000000002751506101734100262250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:480 [source, python] ---- resp = client.ml.set_upgrade_mode( enabled=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c65cb58e131ef46f4dd081683b970ac.asciidoc000066400000000000000000000010701506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:125 [source, python] ---- resp = client.search( index="my_locations,my_geoshapes", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "200km", "pin.location": { "lat": 40, "lon": -70 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c6abb9885cb1a997fcdd16f7fa4f673.asciidoc000066400000000000000000000003311506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shrink-index.asciidoc:17 [source, python] ---- resp = client.indices.shrink( index="my-index-000001", target="shrunk-my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3c7621a81fa982b79f040a6d2611530e.asciidoc000066400000000000000000000016141506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-template.asciidoc:157 [source, python] ---- resp = client.cluster.put_component_template( name="ct1", template={ "settings": { "index.number_of_shards": 2 } }, ) print(resp) resp1 = client.cluster.put_component_template( name="ct2", template={ "settings": { "index.number_of_replicas": 0 }, "mappings": { "properties": { "@timestamp": { "type": "date" } } } }, ) print(resp1) resp2 = client.indices.put_index_template( name="final-template", index_patterns=[ "my-index-*" ], composed_of=[ "ct1", "ct2" ], priority=5, ) print(resp2) resp3 = client.indices.simulate_template( name="final-template", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/3cd2f7f9096a8e8180f27b6c30e71840.asciidoc000066400000000000000000000011651506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filters-aggregation.asciidoc:76 [source, python] ---- resp = client.search( index="logs", size=0, aggs={ "messages": { "filters": { "filters": [ { "match": { "body": "error" } }, { "match": { "body": "warning" } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3cd93a48906069709b76420c66930c01.asciidoc000066400000000000000000000012671506101734100260510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stemmer-tokenfilter.asciidoc:264 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "lowercase", "my_stemmer" ] } }, "filter": { "my_stemmer": { "type": "stemmer", "language": "light_german" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d05fa99ba8e1f2c3f3dfe59e4ee60f6.asciidoc000066400000000000000000000004761506101734100270240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:24 [source, python] ---- resp = client.search( query={ "match": { "content": "kimchy" } }, highlight={ "fields": { "content": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d1a0e1dc5310544d032108ae0b3f099.asciidoc000066400000000000000000000003411506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-all-query.asciidoc:23 [source, python] ---- resp = client.search( query={ "match_all": { "boost": 1.2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d1ff6097e2359f927c88c2ccdb36252.asciidoc000066400000000000000000000002051506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/root.asciidoc:17 [source, python] ---- resp = client.info() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d316bddd8503a6cc10566630a4155d3.asciidoc000066400000000000000000000003061506101734100263130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/get-settings.asciidoc:22 [source, python] ---- resp = client.perform_request( "GET", "/_watcher/settings", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d48d1ba49f680aac32177d653944623.asciidoc000066400000000000000000000003111506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/actions.asciidoc:186 [source, python] ---- resp = client.watcher.ack_watch( watch_id="", action_id="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d6935e04de21ab2f103e5b61cfd7a5b.asciidoc000066400000000000000000000006751506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:647 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "rename": { "description": "Rename 'provider' to 'cloud.provider'", "field": "provider", "target_field": "cloud.provider", "ignore_failure": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d6a56dd3d93ece0e3da3fb66b4696d3.asciidoc000066400000000000000000000002221506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-usage.asciidoc:71 [source, python] ---- resp = client.nodes.usage() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3d82257167e8a14a7f474848b32da128.asciidoc000066400000000000000000000011571506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/set.asciidoc:157 [source, python] ---- resp = client.ingest.put_pipeline( id="set_bar", description="sets the value of bar from the field foo", processors=[ { "set": { "field": "bar", "copy_from": "foo" } } ], ) print(resp) resp1 = client.ingest.simulate( id="set_bar", docs=[ { "_source": { "foo": [ "foo1", "foo2" ] } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/3da35090e093c2d83c3b7d0d83bcb4ae.asciidoc000066400000000000000000000003761506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // path-settings-overview.asciidoc:51 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.exclude._name": "target-node-name" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3db2b5a6424aa92ecab7a8640c38685a.asciidoc000066400000000000000000000002601506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete.asciidoc:186 [source, python] ---- resp = client.delete( index="my-index-000001", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3dd45f65e7bfe207e8d796118f25613c.asciidoc000066400000000000000000000003371506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-cluster-shard-limit.asciidoc:147 [source, python] ---- resp = client.cluster.get_settings( flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e121b43773cbb6dffa9b483c86a1f8d.asciidoc000066400000000000000000000014541506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-update-api-keys.asciidoc:87 [source, python] ---- resp = client.security.create_api_key( name="my-api-key", role_descriptors={ "role-a": { "cluster": [ "all" ], "indices": [ { "names": [ "index-a*" ], "privileges": [ "read" ] } ] } }, metadata={ "application": "my-application", "environment": { "level": 1, "trusted": True, "tags": [ "dev", "staging" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e13c8a81f40a537eddc0b57633b45f8.asciidoc000066400000000000000000000003561506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:295 [source, python] ---- resp = client.indices.analyze( index="test_index", analyzer="my_analyzer", text="missing bicycles", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e1cb34fd6e510c79c2fff2126ac1c61.asciidoc000066400000000000000000000005611506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/meta-field.asciidoc:9 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "_meta": { "class": "MyApp::User", "version": { "min": "1.0", "max": "1.3" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e278e6c193b4c17dbdc70670e15d78c.asciidoc000066400000000000000000000007011506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:654 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "fields": { "comment": { "fragment_size": 150, "number_of_fragments": 3, "no_match_size": 150 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e33c1a4298ea6a0dec65a3ebf9ba973.asciidoc000066400000000000000000000004001506101734100267060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:339 [source, python] ---- resp = client.termvectors( index="my-index-000001", doc={ "fullname": "John Doe", "text": "test test test" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e4227250d49e81df48773f8ba803ea7.asciidoc000066400000000000000000000004401506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:134 [source, python] ---- resp = client.indices.put_mapping( index="my-data-stream", properties={ "message": { "type": "text" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e6db3d80439c2c176dbd1bb1296b6cf.asciidoc000066400000000000000000000004131506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:1010 [source, python] ---- resp = client.render_search_template( id="my-search-template", params={ "query_string": "hello world" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3e8ed6ae016eb823cb00d9035b8ac459.asciidoc000066400000000000000000000002451506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search.asciidoc:16 [source, python] ---- resp = client.search( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ea33023474e77d73ac0540e3a02b0b2.asciidoc000066400000000000000000000011041506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/mapping-roles.asciidoc:148 [source, python] ---- resp = client.security.put_role_mapping( name="basic_users", roles=[ "user" ], rules={ "any": [ { "field": { "dn": "cn=John Doe,cn=contractors,dc=example,dc=com" } }, { "field": { "groups": "cn=users,dc=example,dc=com" } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ea4c971b3f47735dcc207ee2645fa03.asciidoc000066400000000000000000000004341506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:420 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "remove_index": { "index": "my-index-2099.05.06-000001" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3eb4cdd4a799a117ac1ff5f02b18a512.asciidoc000066400000000000000000000014271506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:70 [source, python] ---- resp = client.indices.create( index="index", mappings={ "properties": { "query": { "type": "percolator" }, "body": { "type": "text" } } }, ) print(resp) resp1 = client.indices.update_aliases( actions=[ { "add": { "index": "index", "alias": "queries" } } ], ) print(resp1) resp2 = client.index( index="queries", id="1", refresh=True, document={ "query": { "match": { "body": "quick brown fox" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/3ec95ba697ff97ee2d1a721a393b5926.asciidoc000066400000000000000000000032051506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/analyzer.asciidoc:38 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase" ] }, "my_stop_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "english_stop" ] } }, "filter": { "english_stop": { "type": "stop", "stopwords": "_english_" } } } }, mappings={ "properties": { "title": { "type": "text", "analyzer": "my_analyzer", "search_analyzer": "my_stop_analyzer", "search_quote_analyzer": "my_analyzer" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "title": "The Quick Brown Fox" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "title": "A Quick Brown Fox" }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "query_string": { "query": "\"the quick brown fox\"" } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/3eca58ef7592b3a857ea3a9898de5997.asciidoc000066400000000000000000000013671506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohashgrid-aggregation.asciidoc:99 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "zoomed-in": { "filter": { "geo_bounding_box": { "location": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } }, "aggregations": { "zoom1": { "geohash_grid": { "field": "location", "precision": 8 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ed39eb60fbfafb70f7825b8d103bf17.asciidoc000066400000000000000000000010521506101734100267150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:75 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "200km", "pin.location": { "lat": 40, "lon": -70 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ed79871d956bfb2d6d2721d7272520c.asciidoc000066400000000000000000000002671506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/stats.asciidoc:118 [source, python] ---- resp = client.watcher.stats( metric="current_watches", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ee232bcb2281a12b33cd9764ee4081a.asciidoc000066400000000000000000000014101506101734100264470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geo-grid.asciidoc:174 [source, python] ---- resp = client.ingest.put_pipeline( id="geohex2shape", description="translate H3 cell to polygon with enriched fields", processors=[ { "geo_grid": { "description": "Ingest H3 cells like '811fbffffffffff' and create polygons", "field": "geocell", "tile_type": "geohex", "target_format": "wkt", "target_field": "shape", "parent_field": "parent", "children_field": "children", "non_children_field": "nonChildren", "precision_field": "precision" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f1fe5f5f99b98d0891f38003e10b636.asciidoc000066400000000000000000000005421506101734100263600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-async-query-api.asciidoc:23 [source, python] ---- resp = client.esql.async_query( query="\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", wait_for_completion_timeout="2s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f20459d358611793272f63dc596e889.asciidoc000066400000000000000000000010141506101734100260710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significanttext-aggregation.asciidoc:455 [source, python] ---- resp = client.search( index="news", query={ "match": { "custom_all": "elasticsearch" } }, aggs={ "tags": { "significant_text": { "field": "custom_all", "source_fields": [ "content", "title" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f292a5f67e20f91bf18f5c2412a07bf.asciidoc000066400000000000000000000007611506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/match-enrich-policy-type-ex.asciidoc:79 [source, python] ---- resp = client.ingest.put_pipeline( id="user_lookup", processors=[ { "enrich": { "description": "Add 'user' data based on 'email'", "policy_name": "users-policy", "field": "email", "target_field": "user", "max_matches": "1" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f2e5132e35b9e8b3203a4a0541cf0d4.asciidoc000066400000000000000000000007061506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-searchable-snapshot.asciidoc:103 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "cold": { "actions": { "searchable_snapshot": { "snapshot_repository": "backing_repo" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f30310cc6d0adae6b0f61705624a695.asciidoc000066400000000000000000000007131506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/create-snapshot-api.asciidoc:166 [source, python] ---- resp = client.snapshot.create( repository="my_repository", snapshot="snapshot_2", wait_for_completion=True, indices="index_1,index_2", ignore_unavailable=True, include_global_state=False, metadata={ "taken_by": "user123", "taken_because": "backup before upgrading" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f5b5bee692e7d4b0992dc0a64e95a60.asciidoc000066400000000000000000000021271506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-inner-hits.asciidoc:442 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "my_join_field": { "type": "join", "relations": { "my_parent": "my_child" } } } }, ) print(resp) resp1 = client.index( index="test", id="1", refresh=True, document={ "number": 1, "my_join_field": "my_parent" }, ) print(resp1) resp2 = client.index( index="test", id="2", routing="1", refresh=True, document={ "number": 1, "my_join_field": { "name": "my_child", "parent": "1" } }, ) print(resp2) resp3 = client.search( index="test", query={ "has_child": { "type": "my_child", "query": { "match": { "number": 1 } }, "inner_hits": {} } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/3f60a892bed18151b7baac6cc712576a.asciidoc000066400000000000000000000010021506101734100265360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/kstem-tokenfilter.asciidoc:98 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "whitespace", "filter": [ "lowercase", "kstem" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f669878713a14dfba251c7ce74dd5c4.asciidoc000066400000000000000000000020021506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:640 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_ecommerce" }, pivot={ "group_by": { "customer_id": { "terms": { "field": "customer_id" } } }, "aggregations": { "last": { "top_metrics": { "metrics": [ { "field": "email" }, { "field": "customer_first_name.keyword" }, { "field": "customer_last_name.keyword" } ], "sort": { "order_date": "desc" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f8dc309b63fa0437898107b0d964217.asciidoc000066400000000000000000000002671506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/anomaly-detectors.asciidoc:287 [source, python] ---- resp = client.cat.ml_jobs( h="id,s,dpr,mb", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f94ed945ae6416a0eb372c2db14d7e0.asciidoc000066400000000000000000000005031506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/stemming.asciidoc:116 [source, python] ---- resp = client.search( index="index", query={ "simple_query_string": { "fields": [ "body.exact" ], "query": "ski" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3f9dcf2aa42f3ecfb5ebfe48c1774103.asciidoc000066400000000000000000000005221506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:360 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "order_stats": { "stats": { "field": "taxful_total_price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3faec4ca15d8c2fbbd16781b1c8693d6.asciidoc000066400000000000000000000010411506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:473 [source, python] ---- resp = client.search( index="mistral-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "mistral_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3faf5e2873de340acfe0a617017db784.asciidoc000066400000000000000000000004421506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:283 [source, python] ---- resp = client.search( query={ "query_string": { "query": "(content:this OR name:this) AND (content:that OR name:that)" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fb1289c80a354da66693bfb25d7b412.asciidoc000066400000000000000000000007401506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:514 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="nightly-snapshots", schedule="0 30 2 * * ?", name="", repository="my_repository", config={ "include_global_state": False, "indices": "*" }, retention={ "expire_after": "30d", "min_count": 5, "max_count": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fb2f41ad229a31ad3ae408cc50cbed5.asciidoc000066400000000000000000000004351506101734100267470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:234 [source, python] ---- resp = client.search( index="my-index-000001", timeout="2s", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fe0fb38f75d2a34fb1e6ac9bedbcdbc.asciidoc000066400000000000000000000003501506101734100272670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/ignored-field.asciidoc:21 [source, python] ---- resp = client.search( query={ "exists": { "field": "_ignored" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fe4264ace04405989141c43aadfff81.asciidoc000066400000000000000000000007101506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-roles.asciidoc:173 [source, python] ---- resp = client.security.put_role( name="cli_or_drivers_minimal", cluster=[ "cluster:monitor/main" ], indices=[ { "names": [ "test" ], "privileges": [ "read", "indices:admin/get" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fe5e6c0d5ea4586aa04f989ae54b72e.asciidoc000066400000000000000000000003161506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/verify-repo-api.asciidoc:31 [source, python] ---- resp = client.snapshot.verify_repository( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fe79ed63195c5f8018648a5a6d645f6.asciidoc000066400000000000000000000006401506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/routing-field.asciidoc:87 [source, python] ---- resp = client.indices.create( index="my-index-000002", mappings={ "_routing": { "required": True } }, ) print(resp) resp1 = client.index( index="my-index-000002", id="1", document={ "text": "No routing value provided" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/3fe9006f6c7faea162e43fb250f4da38.asciidoc000066400000000000000000000005021506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:483 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "field": "_source.my-long-field", "value": 10 } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3fecd5c6d0c172566da4a54320e1cff3.asciidoc000066400000000000000000000007271506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/dictionary-decompounder-tokenfilter.asciidoc:32 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "dictionary_decompounder", "word_list": [ "Donau", "dampf", "meer", "schiff" ] } ], text="Donaudampfschiff", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/3ffe9952786ab258bb6ab928b03148a2.asciidoc000066400000000000000000000004401506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/rare-terms-aggregation.asciidoc:92 [source, python] ---- resp = client.search( aggs={ "genres": { "rare_terms": { "field": "genre" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/400e89eb46ead8e9c9e40f123fd5e590.asciidoc000066400000000000000000000004211506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:434 [source, python] ---- resp = client.reindex( source={ "index": "source", "size": 100 }, dest={ "index": "dest", "routing": "=cat" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/402092585940953420404c2884a47e59.asciidoc000066400000000000000000000020601506101734100256230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:860 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": { "field": "product" } } } ] }, "aggregations": { "the_avg": { "avg": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4029af36cb3f8202549017f7378803b4.asciidoc000066400000000000000000000002341506101734100261200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/get-settings.asciidoc:16 [source, python] ---- resp = client.cluster.get_settings() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4053de806dfd9172167999ce098107c4.asciidoc000066400000000000000000000005461506101734100262300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/constant-score-query.asciidoc:12 [source, python] ---- resp = client.search( query={ "constant_score": { "filter": { "term": { "user.id": "kimchy" } }, "boost": 1.2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/405511f7c1f12cc0a227b4563fe7b2e2.asciidoc000066400000000000000000000003651506101734100263770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-async-query-get-api.asciidoc:17 [source, python] ---- resp = client.esql.async_query_get( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/405ac843a9156d3cab374e199cac87fb.asciidoc000066400000000000000000000005751506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/create-connector-sync-job-api.asciidoc:21 [source, python] ---- resp = client.perform_request( "POST", "/_connector/_sync_job", headers={"Content-Type": "application/json"}, body={ "id": "connector-id", "job_type": "full", "trigger_method": "on_demand" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/405db6f3a01eceacfaa8b0ed3e4b3ac2.asciidoc000066400000000000000000000004131506101734100271570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-overall-buckets.asciidoc:181 [source, python] ---- resp = client.ml.get_overall_buckets( job_id="job-*", top_n=2, overall_score=50, start="1403532000000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4061fd5ba7221ca85805ed14d59a6bc5.asciidoc000066400000000000000000000002541506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:271 [source, python] ---- resp = client.delete_script( id="calculate-score", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/406a0f1c1aac947bcee58f86b6d036c1.asciidoc000066400000000000000000000031431506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/actions.asciidoc:112 [source, python] ---- resp = client.watcher.put_watch( id="log_event_watch", trigger={ "schedule": { "interval": "5m" } }, input={ "search": { "request": { "indices": "log-events", "body": { "size": 0, "query": { "match": { "status": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 5 } } }, throttle_period="15m", actions={ "email_administrator": { "email": { "to": "sys.admino@host.domain", "subject": "Encountered {{ctx.payload.hits.total}} errors", "body": "Too many error in the system, see attached data", "attachments": { "attached_data": { "data": { "format": "json" } } }, "priority": "high" } }, "notify_pager": { "webhook": { "method": "POST", "host": "pager.service.domain", "port": 1234, "path": "/{{watch_id}}", "body": "Encountered {{ctx.payload.hits.total}} errors" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/408060f0c52300588a6dee774f4fd6a5.asciidoc000066400000000000000000000443271506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:218 [source, python] ---- resp = client.bulk( index="datastream", refresh=True, operations=[ { "create": {} }, { "@timestamp": "2022-06-21T15:49:00Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 91153, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 463314616 }, "usage": { "bytes": 307007078, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 585236 }, "rss": { "bytes": 102728 }, "pagefaults": 120901, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:45:50Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 124501, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 982546514 }, "usage": { "bytes": 360035574, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1339884 }, "rss": { "bytes": 381174 }, "pagefaults": 178473, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:50Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 38907, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 862723768 }, "usage": { "bytes": 379572388, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 431227 }, "rss": { "bytes": 386580 }, "pagefaults": 233166, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:40Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 86706, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 567160996 }, "usage": { "bytes": 103266017, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1724908 }, "rss": { "bytes": 105431 }, "pagefaults": 233166, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:00Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 150069, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 639054643 }, "usage": { "bytes": 265142477, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1786511 }, "rss": { "bytes": 189235 }, "pagefaults": 138172, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:42:40Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 82260, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 854735585 }, "usage": { "bytes": 309798052, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 924058 }, "rss": { "bytes": 110838 }, "pagefaults": 259073, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:42:10Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 153404, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 279586406 }, "usage": { "bytes": 214904955, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1047265 }, "rss": { "bytes": 91914 }, "pagefaults": 302252, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:40:20Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 125613, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 822782853 }, "usage": { "bytes": 100475044, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 2109932 }, "rss": { "bytes": 278446 }, "pagefaults": 74843, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:40:10Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 100046, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 567160996 }, "usage": { "bytes": 362826547, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1986724 }, "rss": { "bytes": 402801 }, "pagefaults": 296495, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:38:30Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 40018, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 1062428344 }, "usage": { "bytes": 265142477, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 2294743 }, "rss": { "bytes": 340623 }, "pagefaults": 224530, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40a42f005144cfed3dd1dcf2638e8211.asciidoc000066400000000000000000000004751506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:774 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "field": "price", "operator": "gte", "value": 500 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40b73b5c7ca144dc3f63f5b741f33d80.asciidoc000066400000000000000000000007451506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:157 [source, python] ---- resp = client.search( index="my-index-000001", query={ "constant_score": { "filter": { "percolate": { "field": "query", "document": { "message": "A new bonsai tree in the office" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40bd86e400d27e68b8f0ae580c29d32d.asciidoc000066400000000000000000000003571506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:279 [source, python] ---- resp = client.cluster.stats( human=True, filter_path="indices.mappings.total_deduplicated_mapping_size*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40c3e7bb1fdc125a1ab21bd7d7326694.asciidoc000066400000000000000000000014541506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:145 [source, python] ---- resp = client.indices.create( index="mv", mappings={ "properties": { "b": { "type": "long" } } }, ) print(resp) resp1 = client.bulk( index="mv", refresh=True, operations=[ { "index": {} }, { "a": 1, "b": [ 2, 2, 1 ] }, { "index": {} }, { "a": 2, "b": [ 1, 1 ] } ], ) print(resp1) resp2 = client.esql.query( query="FROM mv | EVAL b=TO_STRING(b) | LIMIT 2", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/40d88d4f53343ef663c89ba488ab8001.asciidoc000066400000000000000000000007211506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:412 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "envelope", "coordinates": [ [ 1000, 100 ], [ 1001, 100 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40d90d9dc6f4942bf92d88bfc5a34672.asciidoc000066400000000000000000000005171506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-bool-prefix-query.asciidoc:59 [source, python] ---- resp = client.search( query={ "match_bool_prefix": { "message": { "query": "quick brown f", "analyzer": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40f287bf733420bbab134b74c7d0ea5d.asciidoc000066400000000000000000000007471506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/ingest-vectors.asciidoc:68 [source, python] ---- resp = client.index( index="amazon-reviews", id="1", document={ "review_text": "This product is lifechanging! I'm telling all my friends about it.", "review_vector": [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/40f97f70e8e743c6a6296c81b920aeb0.asciidoc000066400000000000000000000004251506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:314 [source, python] ---- resp = client.nodes.stats( human=True, filter_path="nodes.*.name,nodes.*.indices.mappings.total_estimated_overhead*,nodes.*.jvm.mem.heap_max*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4113c57384aa37c58d11579e20c00760.asciidoc000066400000000000000000000002731506101734100261050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:65 [source, python] ---- resp = client.get( index="my-index-000001", id="0", source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41175d304e660da2931764f9a4418fd3.asciidoc000066400000000000000000000006061506101734100262010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-pipeline-api.asciidoc:94 [source, python] ---- resp = client.connector.update_pipeline( connector_id="my-connector", pipeline={ "extract_binary_content": True, "name": "my-connector-pipeline", "reduce_whitespace": True, "run_ml_inference": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41195ef13af0465cdee1ae18f6c00fde.asciidoc000066400000000000000000000002151506101734100267040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-stop.asciidoc:52 [source, python] ---- resp = client.slm.stop() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/412f8238ab5182678f1d8f6383031b11.asciidoc000066400000000000000000000003101506101734100261070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-alias.asciidoc:16 [source, python] ---- resp = client.indices.get_alias( index="my-data-stream", name="my-alias", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/413fdcc7c437775a16bb55b81c2bbe2b.asciidoc000066400000000000000000000010241506101734100266210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1616 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "http.client.ip": { "type": "ip", "script": "\n String clientip=dissect('%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{status} %{size}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/415b46bc2b7a7b4dcf9a73ac67ea20e9.asciidoc000066400000000000000000000007341506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/circle.asciidoc:99 [source, python] ---- resp = client.index( index="circles", id="2", pipeline="polygonize_circles", document={ "circle": { "type": "circle", "radius": "40m", "coordinates": [ 30, 10 ] } }, ) print(resp) resp1 = client.get( index="circles", id="2", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/416a3ba11232d3c078c1c31340cf356f.asciidoc000066400000000000000000000005401506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:487 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "tags_schema": "styled", "fields": { "comment": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41ad6077f9c1b8d8fefab6ea1660edcd.asciidoc000066400000000000000000000005301506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/format.asciidoc:13 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "date": { "type": "date", "format": "yyyy-MM-dd" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41d24383d29b2808a65258a0a3256e96.asciidoc000066400000000000000000000005621506101734100261220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:188 [source, python] ---- resp = client.indices.create( index="jinaai-index", mappings={ "properties": { "content": { "type": "semantic_text", "inference_id": "jinaai-embeddings" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41dbd79f624b998d01c10921e9a35c4b.asciidoc000066400000000000000000000003501506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:296 [source, python] ---- resp = client.update( index="test", id="1", doc={ "name": "new_name" }, detect_noop=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/41fd33a293a575bd71a1fac7bcc8b47c.asciidoc000066400000000000000000000025341506101734100267110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/put-search-application.asciidoc:153 [source, python] ---- resp = client.search_application.put( name="my-app", search_application={ "indices": [ "index1", "index2" ], "template": { "script": { "source": { "query": { "query_string": { "query": "{{query_string}}", "default_field": "{{default_field}}" } } }, "params": { "query_string": "*", "default_field": "*" } }, "dictionary": { "properties": { "query_string": { "type": "string" }, "default_field": { "type": "string", "enum": [ "title", "description" ] }, "additionalProperties": False }, "required": [ "query_string" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4207219a892339e8f3abe0df8723dd27.asciidoc000066400000000000000000000003671506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/misc.asciidoc:136 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.metadata.administrator": "sysadmin@example.com" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/421e68e2b9789f0e8c08760d9e685d1c.asciidoc000066400000000000000000000011551506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/update-job.asciidoc:264 [source, python] ---- resp = client.ml.update_job( job_id="low_request_rate", description="An updated job", detectors={ "detector_index": 0, "description": "An updated detector description" }, groups=[ "kibana_sample_data", "kibana_sample_web_logs" ], model_plot_config={ "enabled": True }, renormalization_window_days=30, background_persist_interval="2h", model_snapshot_retention_days=7, results_retention_days=60, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/424fbf082cd4affb84439abfc916b597.asciidoc000066400000000000000000000004571506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/downsample-data-stream.asciidoc:65 [source, python] ---- resp = client.indices.downsample( index="my-time-series-index", target_index="my-downsampled-time-series-index", config={ "fixed_interval": "1d" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/425eaaf9c7e3b1e77a3474fbab4183b4.asciidoc000066400000000000000000000004121506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:36 [source, python] ---- resp = client.cat.thread_pool( v=True, s="t,n", h="type,name,node_name,active,queue,rejected,completed", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4275ecbe4aa68d43a8a7139866610a27.asciidoc000066400000000000000000000007221506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/weighted-avg-aggregation.asciidoc:55 [source, python] ---- resp = client.search( index="exams", size=0, aggs={ "weighted_grade": { "weighted_avg": { "value": { "field": "grade" }, "weight": { "field": "weight" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/42ba7c1d13aee91fe6f0a8a42c30eb74.asciidoc000066400000000000000000000003171506101734100266770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:132 [source, python] ---- resp = client.indices.rollover( alias="my-data-stream", lazy=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/42bc7608bb675dd6238e2fecbb758d06.asciidoc000066400000000000000000000010461506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:36 [source, python] ---- resp = client.index( index="postal_codes", id="1", refresh="wait_for", document={ "location": { "type": "envelope", "coordinates": [ [ 13, 53 ], [ 14, 52 ] ] }, "postal_code": "96598" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/42d02087f1c8ab0452ef373079a76843.asciidoc000066400000000000000000000003761506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/stop-analyzer.asciidoc:15 [source, python] ---- resp = client.indices.analyze( analyzer="stop", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/42deb4fe32afbe0f94185e256a79c447.asciidoc000066400000000000000000000012071506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/stop-analyzer.asciidoc:249 [source, python] ---- resp = client.indices.create( index="stop_example", settings={ "analysis": { "filter": { "english_stop": { "type": "stop", "stopwords": "_english_" } }, "analyzer": { "rebuilt_stop": { "tokenizer": "lowercase", "filter": [ "english_stop" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4301cb9d970ec65778f91ce1f438e0d5.asciidoc000066400000000000000000000007401506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:291 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "logs-nginx.access-prod", "alias": "logs" } }, { "add": { "index": "logs-my_app-default", "alias": "logs", "is_write_index": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/430705509f8367aef92be413f702520b.asciidoc000066400000000000000000000003711506101734100261740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-status-api.asciidoc:82 [source, python] ---- resp = client.connector.update_status( connector_id="my-connector", status="needs_configuration", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4310869b97d4224acaa6d66b1e196048.asciidoc000066400000000000000000000006251506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:184 [source, python] ---- resp = client.search( index="my-index", query={ "sparse_vector": { "field": "content_embedding", "inference_id": "my-elser-endpoint", "query": "How to avoid muscle soreness after running?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4323f6d224847eccdce59c23e33fda0a.asciidoc000066400000000000000000000007611506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc:126 [source, python] ---- resp = client.indices.create( index="cjk_bigram_example", settings={ "analysis": { "analyzer": { "standard_cjk_bigram": { "tokenizer": "standard", "filter": [ "cjk_bigram" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/433cf45a23decdf3a096016ffaaf26ba.asciidoc000066400000000000000000000006051506101734100267600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:396 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "my-index-2099.05.06-000001", "alias": "my-alias", "search_routing": "1", "index_routing": "2" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4342ccf6cc24fd80bd3cd1f9a4c2ef8e.asciidoc000066400000000000000000000010031506101734100270470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:515 [source, python] ---- resp = client.clear_scroll( scroll_id=[ "DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", "DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/435e0d6a7d86e074d572d9671b7b9676.asciidoc000066400000000000000000000014761506101734100263170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:226 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "Polygon", "coordinates": [ [ [ 100, 0 ], [ 101, 0 ], [ 101, 1 ], [ 100, 1 ], [ 100, 0 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/43854be6aae61edbea5f9ab988cb4ce5.asciidoc000066400000000000000000000003761506101734100270760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/using-ip-filtering.asciidoc:146 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.security.transport.filter.allow": "172.16.0.0/24" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/43d9e314431336a6f084cea76dfd6489.asciidoc000066400000000000000000000006241506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:254 [source, python] ---- resp = client.search( index="restaurants", retriever={ "knn": { "field": "vector", "query_vector": [ 10, 22, 77 ], "k": 10, "num_candidates": 10 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/43e86fbaeed068dcc981214338559b5a.asciidoc000066400000000000000000000003161506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve-cluster.asciidoc:92 [source, python] ---- resp = client.indices.resolve_cluster( name="my-index-*,cluster*:my-index-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/43f77ddf1ed8106d4f47a12d39df8e3b.asciidoc000066400000000000000000000004341506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:113 [source, python] ---- resp = client.index( index="my-index-000001", id="my_id", pipeline="networks_lookup", document={ "ip": "10.100.34.1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/43fe75fa9f3fca846598fdad58fd98cb.asciidoc000066400000000000000000000002151506101734100270410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/usage.asciidoc:44 [source, python] ---- resp = client.xpack.usage() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44198781d164a15be633d4469485a544.asciidoc000066400000000000000000000012341506101734100260530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:383 [source, python] ---- resp = client.search( index="my-index-bit-vectors", query={ "script_score": { "query": { "match_all": {} }, "script": { "source": "dotProduct(params.query_vector, 'my_dense_vector')", "params": { "query_vector": [ 8, 5, -15, 1, -7 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/441be98c597698bb2809372abf086c3e.asciidoc000066400000000000000000000004331506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/doc-count-field.asciidoc:80 [source, python] ---- resp = client.search( aggs={ "histogram_titles": { "terms": { "field": "my_text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/441f330f6872f995769db1ce2b9627e2.asciidoc000066400000000000000000000004161506101734100263060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:686 [source, python] ---- resp = client.search( stored_fields=[], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44231f7cdd5c3a21025861cdef31e355.asciidoc000066400000000000000000000003611506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:206 [source, python] ---- resp = client.indices.shrink( index="my-index", target="my-shrunken-index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4427517dcd8ec9997541150cdc11a0de.asciidoc000066400000000000000000000003331506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/snapshot/corrupt-repository.asciidoc:116 [source, python] ---- resp = client.snapshot.delete_repository( name="my-repo", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4435b654994b575ba181ea679871c78c.asciidoc000066400000000000000000000004121506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:26 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44385b61342e20ea05f254015b2b04d7.asciidoc000066400000000000000000000003641506101734100261510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-delete-roles.asciidoc:54 [source, python] ---- resp = client.security.bulk_delete_role( names=[ "my_admin_role", "my_user_role" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/443dd902f64b3217505c9595839c3b2d.asciidoc000066400000000000000000000004501506101734100262050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:138 [source, python] ---- resp = client.search( indices_boost=[ { "my-alias": 1.4 }, { "my-index*": 1.3 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/443e8da9968f1c65f46a2a65a1e1e078.asciidoc000066400000000000000000000023651506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-tsds.asciidoc:147 [source, python] ---- resp = client.indices.put_index_template( name="my-weather-sensor-index-template", index_patterns=[ "metrics-weather_sensors-*" ], data_stream={}, template={ "settings": { "index.mode": "time_series", "index.lifecycle.name": "my-lifecycle-policy" }, "mappings": { "properties": { "sensor_id": { "type": "keyword", "time_series_dimension": True }, "location": { "type": "keyword", "time_series_dimension": True }, "temperature": { "type": "half_float", "time_series_metric": "gauge" }, "humidity": { "type": "half_float", "time_series_metric": "gauge" }, "@timestamp": { "type": "date" } } } }, priority=500, meta={ "description": "Template for my weather sensor data" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/445f8a6ef75fb43da52990b3a9063c78.asciidoc000066400000000000000000000005241506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1656 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "http.responses": "304" } }, fields=[ "http.client_ip", "timestamp", "http.verb" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/446e8fc8ccfb13bb5ec64e32a5676d18.asciidoc000066400000000000000000000004241506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/elision-tokenfilter.asciidoc:34 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "elision" ], text="j’examine près du wharf", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4479e8c63a04fa22207a6a8803eadcad.asciidoc000066400000000000000000000004121506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/allocation_awareness.asciidoc:62 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.awareness.attributes": "rack_id" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44939997b0f2601f82a93585a879f65a.asciidoc000066400000000000000000000013211506101734100261540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/simplepatternsplit-tokenizer.asciidoc:40 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "simple_pattern_split", "pattern": "_" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="an_underscored_phrase", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/4498b9d3b0c77e1b9ef6664ff5963ce2.asciidoc000066400000000000000000000003661506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:61 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.requests.cache.enable": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44b8a236d7cfb31c43c6d066ae16d8cd.asciidoc000066400000000000000000000004151506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:40 [source, python] ---- resp = client.search( index="my-index-000001", profile=True, query={ "match": { "message": "GET /search" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44bca3f17d403517af3616754dc795bb.asciidoc000066400000000000000000000013431506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/script-score-query.asciidoc:352 [source, python] ---- resp = client.explain( index="my-index-000001", id="0", query={ "script_score": { "query": { "match": { "message": "elasticsearch" } }, "script": { "source": "\n long count = doc['count'].value;\n double normalizedCount = count / 10;\n if (explanation != null) {\n explanation.set('normalized count = count / 10 = ' + count + ' / 10 = ' + normalizedCount);\n }\n return normalizedCount;\n " } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44da736ce0e1587c1e7c45eee606ead7.asciidoc000066400000000000000000000005351506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:409 [source, python] ---- resp = client.update_by_query( index="my-index-000001", script={ "source": "ctx._source.count++", "lang": "painless" }, query={ "term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44db41b8465af951e366da97ade63bc1.asciidoc000066400000000000000000000003171506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/apis/reload-analyzers.asciidoc:160 [source, python] ---- resp = client.indices.reload_search_analyzers( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44dd65d69267017fa2fb2cffadef40bb.asciidoc000066400000000000000000000010221506101734100267700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cardinality-aggregation.asciidoc:188 [source, python] ---- resp = client.search( index="sales", size="0", runtime_mappings={ "type_and_promoted": { "type": "keyword", "script": "emit(doc['type'].value + ' ' + doc['promoted'].value)" } }, aggs={ "type_promoted_count": { "cardinality": { "field": "type_and_promoted" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/44dfac5bc3131014e2c6bb1ebc76b33d.asciidoc000066400000000000000000000005071506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:146 [source, python] ---- resp = client.indices.create( index="index_double", mappings={ "properties": { "field": { "type": "double" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/451b441c3311103d0d2bdbab771b26d2.asciidoc000066400000000000000000000006611506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:987 [source, python] ---- resp = client.put_script( id="my-search-template", script={ "lang": "mustache", "source": "\n {\n \"query\": {\n \"match\": {\n {{=( )=}}\n \"message\": \"(query_string)\"\n (={{ }}=)\n }\n }\n }\n " }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/451e7c29b2cf738cfc822f7c175bef56.asciidoc000066400000000000000000000007501506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc:29 [source, python] ---- resp = client.indices.put_index_template( name="my-index-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "lifecycle": { "data_retention": "7d" } }, meta={ "description": "Template with data stream lifecycle" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4527d9bb12cf738111a188af235d5d4c.asciidoc000066400000000000000000000010711506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/grok-syntax.asciidoc:176 [source, python] ---- resp = client.search( index="my-index", runtime_mappings={ "http.clientip": { "type": "ip", "script": "\n String clientip=grok('%{COMMONAPACHELOG}').extract(doc[\"message\"].value)?.clientip;\n if (clientip != null) emit(clientip);\n " } }, query={ "match": { "http.clientip": "40.135.0.0" } }, fields=[ "http.clientip" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/45499ed1824d1d7cb59972580d2344cb.asciidoc000066400000000000000000000005421506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:68 [source, python] ---- resp = client.search( index="my_index", query={ "range": { "my_counter": { "gte": "9223372036854775808", "lte": "18446744073709551615" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/455029c3d66306ad5d48f6dbddaf7324.asciidoc000066400000000000000000000027361506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/sum-aggregation.asciidoc:140 [source, python] ---- resp = client.indices.create( index="metrics_index", mappings={ "properties": { "latency_histo": { "type": "histogram" } } }, ) print(resp) resp1 = client.index( index="metrics_index", id="1", refresh=True, document={ "network.name": "net-1", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp1) resp2 = client.index( index="metrics_index", id="2", refresh=True, document={ "network.name": "net-2", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp2) resp3 = client.search( index="metrics_index", size="0", filter_path="aggregations", aggs={ "total_latency": { "sum": { "field": "latency_histo" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/4553e0acb6336687d61eaecc73f517b7.asciidoc000066400000000000000000000014141506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/mapping-charfilter.asciidoc:109 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "char_filter": [ "my_mappings_char_filter" ] } }, "char_filter": { "my_mappings_char_filter": { "type": "mapping", "mappings": [ ":) => _happy_", ":( => _sad_" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/45813d971bfa890ffa2f51f3f480cce5.asciidoc000066400000000000000000000005251506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:355 [source, python] ---- resp = client.search( index="test_index", query={ "percolate": { "field": "query", "document": { "body": "Bycicles are missing" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/458b2228aed7464d915a5d73cb6b98f6.asciidoc000066400000000000000000000003001506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/snapshots.asciidoc:135 [source, python] ---- resp = client.cat.snapshots( repository="repo1", v=True, s="id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/45954b8aaedfed57012be8b6538b0a24.asciidoc000066400000000000000000000020731506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/chat-completion-inference.asciidoc:356 [source, python] ---- resp = client.inference.stream_inference( task_type="chat_completion", inference_id="openai-completion", messages=[ { "role": "user", "content": [ { "type": "text", "text": "What's the price of a scarf?" } ] } ], tools=[ { "type": "function", "function": { "name": "get_current_price", "description": "Get the current price of a item", "parameters": { "type": "object", "properties": { "item": { "id": "123" } } } } } ], tool_choice={ "type": "function", "function": { "name": "get_current_price" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/45b74f1904533fdb37a5a6f3c8f4ec9b.asciidoc000066400000000000000000000015311506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/edgengram-tokenizer.asciidoc:144 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "edge_ngram", "min_gram": 2, "max_gram": 10, "token_chars": [ "letter", "digit" ] } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="2 Quick Foxes.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/45c6e54a9c9e08623af96752b4bde346.asciidoc000066400000000000000000000007351506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:213 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "12km", "pin.location": "POINT (-70 40)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/45ef5156dbd2d3fd4fd22b8d99f7aad4.asciidoc000066400000000000000000000003511506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/restart-cluster.asciidoc:233 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.enable": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46064e81620162a23e75002a7eeb8b10.asciidoc000066400000000000000000000005221506101734100261500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/move-to-step.asciidoc:194 [source, python] ---- resp = client.ilm.move_to_step( index="my-index-000001", current_step={ "phase": "hot", "action": "complete", "name": "complete" }, next_step={ "phase": "warm" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46103fee3cd5f53dc75123def82d52ad.asciidoc000066400000000000000000000006331506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:293 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "settings": { "index.refresh_interval": "30s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/464dffb6a6e24a860223d1c32b232f95.asciidoc000066400000000000000000000023201506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/minhash-tokenfilter.asciidoc:134 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "filter": { "my_shingle_filter": { "type": "shingle", "min_shingle_size": 5, "max_shingle_size": 5, "output_unigrams": False }, "my_minhash_filter": { "type": "min_hash", "hash_count": 1, "bucket_count": 512, "hash_set_size": 1, "with_rotation": True } }, "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "my_shingle_filter", "my_minhash_filter" ] } } } }, mappings={ "properties": { "fingerprint": { "type": "text", "analyzer": "my_analyzer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4659f639d71a54df571260ee5798dbb3.asciidoc000066400000000000000000000013711506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geotilegrid-aggregation.asciidoc:114 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "zoomed-in": { "filter": { "geo_bounding_box": { "location": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } }, "aggregations": { "zoom1": { "geotile_grid": { "field": "location", "precision": 22 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46658f00edc4865dfe472a392374cd0f.asciidoc000066400000000000000000000003331506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template-v1.asciidoc:258 [source, python] ---- resp = client.indices.get_template( name="template_1", filter_path="*.version", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4670dd81a9865e07ae74ae8b0266e384.asciidoc000066400000000000000000000015151506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/t-test-aggregation.asciidoc:148 [source, python] ---- resp = client.search( index="node_upgrade", size=0, runtime_mappings={ "startup_time_before.adjusted": { "type": "long", "script": { "source": "emit(doc['startup_time_before'].value - params.adjustment)", "params": { "adjustment": 10 } } } }, aggs={ "startup_time_ttest": { "t_test": { "a": { "field": "startup_time_before.adjusted" }, "b": { "field": "startup_time_after" }, "type": "paired" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/467833bd44b35a89a7fe0d7df5f253f1.asciidoc000066400000000000000000000004041506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/pattern-analyzer.asciidoc:29 [source, python] ---- resp = client.indices.analyze( analyzer="pattern", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/468f7ec42cdd8287cdea3ec1cea4a514.asciidoc000066400000000000000000000006441506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:338 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script={ "source": "if (ctx._source.tags.contains(params['tag'])) { ctx._source.tags.remove(ctx._source.tags.indexOf(params['tag'])) }", "lang": "painless", "params": { "tag": "blue" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46a0eaaf5c881f1ba716d1812b36c724.asciidoc000066400000000000000000000014241506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/bi-directional-disaster-recovery.asciidoc:87 [source, python] ---- resp = client.ccr.put_auto_follow_pattern( name="logs-generic-default", remote_cluster="clusterB", leader_index_patterns=[ ".ds-logs-generic-default-20*" ], leader_index_exclusion_patterns="*-replicated_from_clustera", follow_index_pattern="{{leader_index}}-replicated_from_clusterb", ) print(resp) resp1 = client.ccr.put_auto_follow_pattern( name="logs-generic-default", remote_cluster="clusterA", leader_index_patterns=[ ".ds-logs-generic-default-20*" ], leader_index_exclusion_patterns="*-replicated_from_clusterb", follow_index_pattern="{{leader_index}}-replicated_from_clustera", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/46b1c1f6e0c86528be84c373eeb8d425.asciidoc000066400000000000000000000010301506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/update-license.asciidoc:145 [source, python] ---- resp = client.license.post( acknowledge=True, licenses=[ { "uid": "893361dc-9749-4997-93cb-802e3d7fa4xx", "type": "basic", "issue_date_in_millis": 1411948800000, "expiry_date_in_millis": 1914278399999, "max_nodes": 1, "issued_to": "issuedTo", "issuer": "issuer", "signature": "xx" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46b771a9932c3fa6057a7b2679c72ef0.asciidoc000066400000000000000000000003261506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:143 [source, python] ---- resp = client.indices.get_migrate_reindex_status( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46c5c14f20118dcf519ff6ef21360209.asciidoc000066400000000000000000000010121506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-downsample.asciidoc:37 [source, python] ---- resp = client.ilm.put_lifecycle( name="datastream_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_docs": 1 }, "downsample": { "fixed_interval": "1h" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/46ce40227fa60aa6ba435f366b3a1f5f.asciidoc000066400000000000000000000007371506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:101 [source, python] ---- resp = client.ccr.pause_follow( index="kibana_sample_data_ecommerce2", ) print(resp) resp1 = client.indices.close( index="kibana_sample_data_ecommerce2", ) print(resp1) resp2 = client.ccr.unfollow( index="kibana_sample_data_ecommerce2", ) print(resp2) resp3 = client.indices.open( index="kibana_sample_data_ecommerce2", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/46ebd468c3f132a4978088964466c5cd.asciidoc000066400000000000000000000007601506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/apostrophe-tokenfilter.asciidoc:77 [source, python] ---- resp = client.indices.create( index="apostrophe_example", settings={ "analysis": { "analyzer": { "standard_apostrophe": { "tokenizer": "standard", "filter": [ "apostrophe" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/472ec8c57fec8457e31fe6dd7f6e3713.asciidoc000066400000000000000000000005331506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:448 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "title" ], "query": "this that thus", "minimum_should_match": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/473c8ddd4e4b7814a64e5fe40d9d6dca.asciidoc000066400000000000000000000003001506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/task-management.asciidoc:31 [source, python] ---- resp = client.tasks.cancel( task_id="2j8UKw1bRO283PMwDugNNg:5326", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4752f82fec8b46e5a4b3788b76e3041f.asciidoc000066400000000000000000000011011506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-migrate.asciidoc:84 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "migrate": { "enabled": False }, "allocate": { "include": { "rack_id": "one,two" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/47909e194d10743093f4a22c27a85925.asciidoc000066400000000000000000000013171506101734100260500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:198 [source, python] ---- resp = client.search( size=10000, query={ "match": { "user.id": "elkbee" } }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", "keep_alive": "1m" }, sort=[ { "@timestamp": { "order": "asc", "format": "strict_date_optional_time_nanos", "numeric_type": "date_nanos" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/47e6dfb5b09d954c9c0c33fda2b6c66d.asciidoc000066400000000000000000000006151506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-user.asciidoc:167 [source, python] ---- resp = client.security.put_user( username="jacknich", password="l0ng-r4nd0m-p@ssw0rd", roles=[ "admin", "other_role1" ], full_name="Jack Nicholson", email="jacknich@example.com", metadata={ "intelligence": 7 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/47fde7874e15a37242993fd69c62063b.asciidoc000066400000000000000000000006721506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-rank-aggregation.asciidoc:29 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_ranks": { "percentile_ranks": { "field": "load_time", "values": [ 500, 600 ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/480e531db799c4c909afd8e2a73a8d0b.asciidoc000066400000000000000000000002311506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/forcemerge.asciidoc:199 [source, python] ---- resp = client.indices.forcemerge() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4818a1288ac24a56d6d6a4130ee70202.asciidoc000066400000000000000000000003041506101734100262340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:212 [source, python] ---- resp = client.get_script( id="my-search-template", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4824a823a830a2a5d990eacfd783ac22.asciidoc000066400000000000000000000011571506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:448 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", slice={ "id": 0, "max": 2 }, query={ "range": { "http.response.bytes": { "lt": 2000000 } } }, ) print(resp) resp1 = client.delete_by_query( index="my-index-000001", slice={ "id": 1, "max": 2 }, query={ "range": { "http.response.bytes": { "lt": 2000000 } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/48313f620c2871b6f4019b66be730109.asciidoc000066400000000000000000000015611506101734100261140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:112 [source, python] ---- resp = client.search( index="shirts", query={ "bool": { "filter": { "term": { "brand": "gucci" } } } }, aggs={ "colors": { "terms": { "field": "color" } }, "color_red": { "filter": { "term": { "color": "red" } }, "aggs": { "models": { "terms": { "field": "model" } } } } }, post_filter={ "term": { "color": "red" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/483d669ec0768bc4e275a568c6164704.asciidoc000066400000000000000000000003041506101734100262170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-pause-follow.asciidoc:35 [source, python] ---- resp = client.ccr.pause_follow( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/484e24d1ed1a154ba9753e6090d38d78.asciidoc000066400000000000000000000005301506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:140 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "point", "coordinates": [ -377.03653, 389.897676 ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/487f0e07fd83c05f9763e0795c525e2e.asciidoc000066400000000000000000000037701506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geoline-aggregation.asciidoc:13 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "my_location": { "type": "geo_point" }, "group": { "type": "keyword" }, "@timestamp": { "type": "date" } } }, ) print(resp) resp1 = client.bulk( index="test", refresh=True, operations=[ { "index": {} }, { "my_location": { "lat": 52.373184, "lon": 4.889187 }, "@timestamp": "2023-01-02T09:00:00Z" }, { "index": {} }, { "my_location": { "lat": 52.370159, "lon": 4.885057 }, "@timestamp": "2023-01-02T10:00:00Z" }, { "index": {} }, { "my_location": { "lat": 52.369219, "lon": 4.901618 }, "@timestamp": "2023-01-02T13:00:00Z" }, { "index": {} }, { "my_location": { "lat": 52.374081, "lon": 4.91235 }, "@timestamp": "2023-01-02T16:00:00Z" }, { "index": {} }, { "my_location": { "lat": 52.371667, "lon": 4.914722 }, "@timestamp": "2023-01-03T12:00:00Z" } ], ) print(resp1) resp2 = client.search( index="test", filter_path="aggregations", aggs={ "line": { "geo_line": { "point": { "field": "my_location" }, "sort": { "field": "@timestamp" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/488f6df1df71972392b670ce557f7ff3.asciidoc000066400000000000000000000005011506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template-v1.asciidoc:240 [source, python] ---- resp = client.indices.put_template( name="template_1", index_patterns=[ "my-index-*" ], order=0, settings={ "number_of_shards": 1 }, version=123, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/48d9697a14dfe131325521f48a7adc84.asciidoc000066400000000000000000000007541506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:867 [source, python] ---- resp = client.render_search_template( id="my-search-template", params={ "query_string": "My string", "text_fields": [ { "user_name": "John", "last": False }, { "user_name": "kimchy", "last": True } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/48de51de87a8ad9fd8b8db1ca25b85c1.asciidoc000066400000000000000000000007501506101734100270130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:542 [source, python] ---- resp = client.indices.close( index="index", ) print(resp) resp1 = client.indices.put_settings( index="index", settings={ "index": { "similarity": { "default": { "type": "boolean" } } } }, ) print(resp1) resp2 = client.indices.open( index="index", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/48e142e6c69014e0509d4c9251749d77.asciidoc000066400000000000000000000006701506101734100261400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-openai.asciidoc:161 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="openai-embeddings", inference_config={ "service": "openai", "service_settings": { "api_key": "", "model_id": "text-embedding-3-small", "dimensions": 128 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49100a4f53c0ba345fadacdc4f2f86e4.asciidoc000066400000000000000000000003311506101734100266750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:74 [source, python] ---- resp = client.search( q="kimchy", filter_path="took,hits.hits._id,hits.hits._score", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4955bae30f265b9e436f82b015de6d7e.asciidoc000066400000000000000000000005731506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-query.asciidoc:193 [source, python] ---- resp = client.search( index="my-index-000001", pretty=True, query={ "terms": { "color": { "index": "my-index-000001", "id": "2", "path": "color" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/496d35c89dc991a1509f7e8fb93ade45.asciidoc000066400000000000000000000023461506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:232 [source, python] ---- resp = client.indices.create( index="bengali_example", settings={ "analysis": { "filter": { "bengali_stop": { "type": "stop", "stopwords": "_bengali_" }, "bengali_keywords": { "type": "keyword_marker", "keywords": [ "উদাহরণ" ] }, "bengali_stemmer": { "type": "stemmer", "language": "bengali" } }, "analyzer": { "rebuilt_bengali": { "tokenizer": "standard", "filter": [ "lowercase", "decimal_digit", "bengali_keywords", "indic_normalization", "bengali_normalization", "bengali_stop", "bengali_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4980d6fcb369692b0b29ddc6767d4324.asciidoc000066400000000000000000000004071506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/diagnose-unassigned-shards.asciidoc:198 [source, python] ---- resp = client.cluster.allocation_explain( index="my-index-000001", shard=0, primary=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4982c547be1ad9455ae836990aea92c5.asciidoc000066400000000000000000000006311506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/start-trained-model-deployment.asciidoc:228 [source, python] ---- resp = client.ml.start_trained_model_deployment( model_id="my_model", deployment_id="my_model_for_search", adaptive_allocations={ "enabled": True, "min_number_of_allocations": 3, "max_number_of_allocations": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4989cc97ce1c8fff634a10d343031bd0.asciidoc000066400000000000000000000003531506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/increase-data-node-capacity.asciidoc:104 [source, python] ---- resp = client.cat.shards( v=True, h="state,node", s="state", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49a19615ebe2c013b8321152163478ab.asciidoc000066400000000000000000000013611506101734100261600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/fields.asciidoc:92 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "text": "quick brown fox" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "text": "quick fox" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "script_score": { "query": { "match": { "text": "quick brown fox" } }, "script": { "source": "_termStats.termFreq().getAverage()" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/49c052a748c943180db78fee8e144239.asciidoc000066400000000000000000000003531506101734100262770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-api-key-cache.asciidoc:56 [source, python] ---- resp = client.security.clear_api_key_cache( ids="yVGMr3QByxdh1MSaicYx,YoiMaqREw0YVpjn40iMg", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49c40b51da2469a6e00fea8fa6fbf56e.asciidoc000066400000000000000000000003761506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/task-management.asciidoc:11 [source, python] ---- resp = client.tasks.list( pretty=True, detailed=True, group_by="parents", human=True, actions="*data/read/esql", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49cb3f48a0097bfc597c52fa51c6d379.asciidoc000066400000000000000000000004121506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:936 [source, python] ---- resp = client.security.put_role( name="saml-service-role", cluster=[ "manage_saml", "manage_token" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49d87c2eb7314ed34221c5fb4f21dfcc.asciidoc000066400000000000000000000003401506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:263 [source, python] ---- resp = client.indices.analyze( index="analyze_sample", normalizer="my_normalizer", text="BaR", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49e8773a34fcbf825de38426cff5509c.asciidoc000066400000000000000000000005501506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:1275 [source, python] ---- resp = client.search( index="my-knn-index", profile=True, knn={ "field": "my-vector", "query_vector": [ -5, 9, -12 ], "k": 3, "num_candidates": 100 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/49f4d2a461536d150e16b1e0a3148678.asciidoc000066400000000000000000000006201506101734100261670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clearcache.asciidoc:116 [source, python] ---- resp = client.indices.clear_cache( index="my-index-000001", fielddata=True, ) print(resp) resp1 = client.indices.clear_cache( index="my-index-000001", query=True, ) print(resp1) resp2 = client.indices.clear_cache( index="my-index-000001", request=True, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/4a1951844bd39f26961bfc965f3432b1.asciidoc000066400000000000000000000004161506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:144 [source, python] ---- resp = client.mget( index="my-index-000001", docs=[ { "_id": "1" }, { "_id": "2" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4a2080ae55d931eb0643cc3eb91ec524.asciidoc000066400000000000000000000020461506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/multi-fields.asciidoc:82 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text": { "type": "text", "fields": { "english": { "type": "text", "analyzer": "english" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "quick brown fox" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "text": "quick brown foxes" }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "multi_match": { "query": "quick brown foxes", "fields": [ "text", "text.english" ], "type": "most_fields" } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/4a4b8a406681584a91c0e614c1fa4344.asciidoc000066400000000000000000000022261506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-api-keys.asciidoc:134 [source, python] ---- resp = client.security.create_api_key( name="my-api-key", expiration="1d", role_descriptors={ "role-a": { "cluster": [ "all" ], "indices": [ { "names": [ "index-a*" ], "privileges": [ "read" ] } ] }, "role-b": { "cluster": [ "all" ], "indices": [ { "names": [ "index-b*" ], "privileges": [ "all" ] } ] } }, metadata={ "application": "my-application", "environment": { "level": 1, "trusted": True, "tags": [ "dev", "staging" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4a72c68b96f44e80463084dfc0449d51.asciidoc000066400000000000000000000010461506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:287 [source, python] ---- resp = client.search( index="my-index-000001", runtime_mappings={ "day_of_week": { "type": "keyword", "script": { "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } } }, aggs={ "day_of_week": { "terms": { "field": "day_of_week" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4a7510a9c0468303658383c00796dad2.asciidoc000066400000000000000000000007751506101734100261230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/ignore-malformed.asciidoc:70 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.mapping.ignore_malformed": True }, mappings={ "properties": { "number_one": { "type": "byte" }, "number_two": { "type": "integer", "ignore_malformed": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4aa81a694266fb634904224d14cd9a87.asciidoc000066400000000000000000000005121506101734100262600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:668 [source, python] ---- resp = client.search( index="my_queries2", query={ "percolate": { "field": "query", "document": { "my_field": "wxyz" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ae494d1e62231e832fc0436b04e2014.asciidoc000066400000000000000000000007351506101734100262400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:122 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", query={ "bool": { "must": { "query_string": { "query": "*:*" } }, "filter": { "term": { "user.id": "kimchy" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4af15c4f26ddefb9c350e7a246a66a15.asciidoc000066400000000000000000000013641506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:362 [source, python] ---- resp = client.search( index="node", filter_path="aggregations", aggs={ "ip": { "terms": { "field": "ip", "order": { "tm.m": "desc" } }, "aggs": { "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "date": "desc" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4b1044259a6d777d87529eae25675005.asciidoc000066400000000000000000000006761506101734100261420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:450 [source, python] ---- resp = client.ingest.put_pipeline( id="set-foo", description="sets foo", processors=[ { "set": { "field": "foo", "value": "bar" } } ], ) print(resp) resp1 = client.update_by_query( index="my-index-000001", pipeline="set-foo", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/4b3a49710fafa35d6d41a8ec12434515.asciidoc000066400000000000000000000012771506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:467 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "documents": [ { "message": "bonsai tree" }, { "message": "new tree" }, { "message": "the office" }, { "message": "office tree" } ] } }, highlight={ "fields": { "message": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4b5110a21676cc0e26e050a4b4552235.asciidoc000066400000000000000000000003011506101734100261320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/get-synonyms-set.asciidoc:81 [source, python] ---- resp = client.synonyms.get_synonym( id="my-synonyms-set", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4b91ad7c9b44e07db4a4e81390f19ad3.asciidoc000066400000000000000000000004111506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/stream-inference.asciidoc:92 [source, python] ---- resp = client.inference.stream_inference( task_type="completion", inference_id="openai-completion", input="What is Elastic?", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ba86373e13e106de044f190343be328.asciidoc000066400000000000000000000017621506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:365 [source, python] ---- resp = client.search( aggs={ "countries": { "terms": { "field": "artist.country", "order": [ { "rock>playback_stats.avg": "desc" }, { "_count": "desc" } ] }, "aggs": { "rock": { "filter": { "term": { "genre": "rock" } }, "aggs": { "playback_stats": { "stats": { "field": "play_count" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bb4a64cf04e3feb133b0221d29beaa9.asciidoc000066400000000000000000000004321506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:127 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot_2099.05.06", indices="my-index,logs-my_app-default", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bb7bcfebca682fb9c9e3e47bfd5ef6f.asciidoc000066400000000000000000000016301506101734100273200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:821 [source, python] ---- resp = client.search( size=0, track_total_hits=False, aggs={ "my_buckets": { "composite": { "sources": [ { "user_name": { "terms": { "field": "user_name" } } }, { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bba59cf745ac7b996bf90308bc26957.asciidoc000066400000000000000000000007601506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:349 [source, python] ---- resp = client.search( index="file-path-test", query={ "bool": { "must": { "match": { "file_path": "16" } }, "filter": { "term": { "file_path.tree": "/User/alice" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bc4db44b8c74610b73f21a421099a13.asciidoc000066400000000000000000000003121506101734100263100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:194 [source, python] ---- resp = client.security.invalidate_token( realm_name="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bc744b0f33b322741a8caf6d8d7d765.asciidoc000066400000000000000000000005661506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:594 [source, python] ---- resp = client.index( index="my-index-000001", id="1", op_type="create", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bd42e31ac4a5cf237777f1a0e97aba8.asciidoc000066400000000000000000000003131506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:286 [source, python] ---- resp = client.transform.start_transform( transform_id="suspicious_client_ips", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4be07b34db282044c88d5021c7ea08ee.asciidoc000066400000000000000000000014661506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:17 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 3 }, "my_text": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="my-index", id="1", document={ "my_text": "text1", "my_vector": [ 0.5, 10, 6 ] }, ) print(resp1) resp2 = client.index( index="my-index", id="2", document={ "my_text": "text2", "my_vector": [ -0.5, 10, 10 ] }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/4be20da16d2b58216e8b307218c7bf3a.asciidoc000066400000000000000000000012301506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:188 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "mappings": { "properties": { "host": { "properties": { "ip": { "type": "ip", "ignore_malformed": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bef98a2dac575a50ee0783c2269f1db.asciidoc000066400000000000000000000006761506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:498 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text_embedding": { "type": "dense_vector", "dims": 384, "index_options": { "type": "flat" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bf6bb703a52267379ae2b1e1308cf8b.asciidoc000066400000000000000000000010441506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/script-query.asciidoc:156 [source, python] ---- resp = client.search( query={ "bool": { "filter": { "script": { "script": { "source": "doc['num1'].value > params.param1", "lang": "painless", "params": { "param1": 5 } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4bfcb2861f1d572bd0d66acd66deab0b.asciidoc000066400000000000000000000004411506101734100270400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/update-datafeed.asciidoc:166 [source, python] ---- resp = client.ml.update_datafeed( datafeed_id="datafeed-test-job", query={ "term": { "geo.src": "US" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c174e228b6b74497b73ef2be80de7ad.asciidoc000066400000000000000000000002641506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/get-trained-models.asciidoc:1467 [source, python] ---- resp = client.ml.get_trained_models() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c3db8987d7b2d3d3df78ff1e71e7ede.asciidoc000066400000000000000000000004211506101734100270250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:22 [source, python] ---- resp = client.search( query={ "match": { "message": { "query": "this is a test" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c5f0d7af287618062bb627b44ccb23e.asciidoc000066400000000000000000000003251506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:197 [source, python] ---- resp = client.indices.forcemerge( index="my-index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c712bd5637892a11f16b8975a0a98ed.asciidoc000066400000000000000000000002631506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/dataframeanalytics.asciidoc:137 [source, python] ---- resp = client.cat.ml_data_frame_analytics( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c777b8360ef6c7671ae2e3803c0b0f6.asciidoc000066400000000000000000000017341506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:52 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "top_tags": { "terms": { "field": "type", "size": 3 }, "aggs": { "top_sales_hits": { "top_hits": { "sort": [ { "date": { "order": "desc" } } ], "_source": { "includes": [ "date", "price" ] }, "size": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c77d12039fe2445c9251e33979071ac.asciidoc000066400000000000000000000007721506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/categorize-text-aggregation.asciidoc:282 [source, python] ---- resp = client.search( index="log-messages", filter_path="aggregations", aggs={ "categories": { "categorize_text": { "field": "message", "categorization_filters": [ "\\w+\\_\\d{3}" ], "similarity_threshold": 11 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c803b088c1915a7b0634d5cafabe606.asciidoc000066400000000000000000000006201506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:219 [source, python] ---- resp = client.search( index="network-traffic", size=0, aggs={ "ipv4-subnets": { "ip_prefix": { "field": "ipv4", "prefix_length": 24, "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c9350ed09b28f00e297ebe73c3b95a2.asciidoc000066400000000000000000000007141506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:236 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="my-msmarco-minilm-model", inference_config={ "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, "model_id": "msmarco-MiniLM-L12-cos-v5" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4c95d54b32df4dc49e9762b6c1ae2c05.asciidoc000066400000000000000000000010011506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:368 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "tag": { "type": "text", "fielddata": True, "fielddata_frequency_filter": { "min": 0.001, "max": 0.1, "min_segment_size": 500 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ca15672fc5ab1d80a127d086b6d2837.asciidoc000066400000000000000000000002511506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/allocation-explain.asciidoc:457 [source, python] ---- resp = client.cluster.allocation_explain() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ca5bc2c2b2f64d15b9c16370ae97a39.asciidoc000066400000000000000000000010421506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohashgrid-aggregation.asciidoc:212 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "tiles-in-bounds": { "geohash_grid": { "field": "location", "precision": 8, "bounds": { "top_left": "POINT (4.21875 53.4375)", "bottom_right": "POINT (5.625 52.03125)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4cb44556b8c699f43489b17b42ddd475.asciidoc000066400000000000000000000007571506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:222 [source, python] ---- resp = client.mget( docs=[ { "_index": "test", "_id": "1", "stored_fields": [ "field1", "field2" ] }, { "_index": "test", "_id": "2", "stored_fields": [ "field3", "field4" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4cd40113e0fc90c37976f28d7e4a2327.asciidoc000066400000000000000000000025301506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/normalizer.asciidoc:18 [source, python] ---- resp = client.indices.create( index="index", settings={ "analysis": { "normalizer": { "my_normalizer": { "type": "custom", "char_filter": [], "filter": [ "lowercase", "asciifolding" ] } } } }, mappings={ "properties": { "foo": { "type": "keyword", "normalizer": "my_normalizer" } } }, ) print(resp) resp1 = client.index( index="index", id="1", document={ "foo": "BÀR" }, ) print(resp1) resp2 = client.index( index="index", id="2", document={ "foo": "bar" }, ) print(resp2) resp3 = client.index( index="index", id="3", document={ "foo": "baz" }, ) print(resp3) resp4 = client.indices.refresh( index="index", ) print(resp4) resp5 = client.search( index="index", query={ "term": { "foo": "BAR" } }, ) print(resp5) resp6 = client.search( index="index", query={ "match": { "foo": "BAR" } }, ) print(resp6) ---- python-elasticsearch-9.1.1/docs/examples/4cdbd53f08df4bf66e2a47c0f1fcb3f8.asciidoc000066400000000000000000000003161506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clearcache.asciidoc:136 [source, python] ---- resp = client.indices.clear_cache( index="my-index-000001", fields="foo,bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4cdcc3fde5cea165a3a7567962b9bd61.asciidoc000066400000000000000000000031411506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/put-synonyms-set.asciidoc:131 [source, python] ---- resp = client.synonyms.put_synonym( id="my-synonyms-set", synonyms_set=[ { "id": "test-1", "synonyms": "hello, hi" } ], ) print(resp) resp1 = client.indices.create( index="test-index", settings={ "analysis": { "filter": { "synonyms_filter": { "type": "synonym_graph", "synonyms_set": "my-synonyms-set", "updateable": True } }, "analyzer": { "my_index_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase" ] }, "my_search_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "synonyms_filter" ] } } } }, mappings={ "properties": { "title": { "type": "text", "analyzer": "my_index_analyzer", "search_analyzer": "my_search_analyzer" } } }, ) print(resp1) resp2 = client.synonyms.put_synonym( id="my-synonyms-set", synonyms_set=[ { "id": "test-1", "synonyms": "hello, hi, howdy" } ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/4ce4563e207233c48ffe849728052dca.asciidoc000066400000000000000000000002551506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:412 [source, python] ---- resp = client.indices.rollover( alias="logs-my_app-default", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4d21725453955582ff12b4a1104aa7b6.asciidoc000066400000000000000000000005241506101734100261630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/update-filter.asciidoc:50 [source, python] ---- resp = client.ml.update_filter( filter_id="safe_domains", description="Updated list of domains", add_items=[ "*.myorg.com" ], remove_items=[ "wikipedia.org" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4d2e6eb7fea407deeb7a859c267fda62.asciidoc000066400000000000000000000014661506101734100270220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/put-job.asciidoc:260 [source, python] ---- resp = client.rollup.put_job( id="sensor", index_pattern="sensor-*", rollup_index="sensor_rollup", cron="*/30 * * * * ?", page_size=1000, groups={ "date_histogram": { "field": "timestamp", "fixed_interval": "1h", "delay": "7d" }, "terms": { "fields": [ "node" ] } }, metrics=[ { "field": "temperature", "metrics": [ "min", "max", "sum" ] }, { "field": "voltage", "metrics": [ "avg" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4d46e2160784bdf7cce948e9f0d31fc8.asciidoc000066400000000000000000000016511506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc:410 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "filter": [ "my_custom_word_delimiter_graph_filter" ] } }, "filter": { "my_custom_word_delimiter_graph_filter": { "type": "word_delimiter_graph", "type_table": [ "- => ALPHA" ], "split_on_case_change": False, "split_on_numerics": False, "stem_english_possessive": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4d7c0b52d3c0a084157428624c543c90.asciidoc000066400000000000000000000002251506101734100261610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/common/apis/get-ml-info.asciidoc:44 [source, python] ---- resp = client.ml.info() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4da0cb8693e9ceceee2ba3b558014bbf.asciidoc000066400000000000000000000015651506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-sharepoint-online.asciidoc:1088 [source, python] ---- resp = client.update_by_query( index="INDEX_NAME", conflicts="proceed", query={ "bool": { "filter": [ { "match": { "object_type": "drive_item" } }, { "exists": { "field": "file" } }, { "range": { "lastModifiedDateTime": { "lte": "now-180d" } } } ] } }, script={ "source": "ctx._source.body = ''", "lang": "painless" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4dc151eebefd484a28aed1a175743364.asciidoc000066400000000000000000000010001506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:93 [source, python] ---- resp = client.ingest.put_pipeline( id="openai_embeddings_pipeline", processors=[ { "inference": { "model_id": "openai_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4de4bb55bbc0a76c75d256f245a3ee3f.asciidoc000066400000000000000000000005561506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/elastic-infer-service.asciidoc:100 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="elser-model-eis", inference_config={ "service": "elastic", "service_settings": { "model_name": "elser" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ded8ad815ac0e83b1c21a6c18fd0763.asciidoc000066400000000000000000000003341506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/ecommerce-tutorial.asciidoc:401 [source, python] ---- resp = client.transform.start_transform( transform_id="ecommerce-customer-transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e1f02928ef243bf07fd425754b7642b.asciidoc000066400000000000000000000004721506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/add-nodes.asciidoc:109 [source, python] ---- resp = client.cluster.post_voting_config_exclusions( node_names="node_name", ) print(resp) resp1 = client.cluster.post_voting_config_exclusions( node_names="node_name", timeout="1m", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/4e2317aa45e87922d07c8ddc67a82d32.asciidoc000066400000000000000000000014221506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:100 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "path_hierarchy", "delimiter": "-", "replacement": "/", "skip": 2 } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="one-two-three-four-five", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/4e3414fc712b16311f9e433dd366f49d.asciidoc000066400000000000000000000003441506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/delete-inference.asciidoc:70 [source, python] ---- resp = client.inference.delete( task_type="sparse_embedding", inference_id="my-elser-model", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e4608ae4ce93c27bd174a9ea078cab2.asciidoc000066400000000000000000000017261506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/hybrid-search.asciidoc:10 [source, python] ---- resp = client.search( index="my-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "match": { "my_text_field": "the query string" } } } }, { "standard": { "query": { "sparse_vector": { "field": "my_tokens", "inference_id": "my-elser-endpoint", "query": "the query string" } } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e50d9d25bfb07ac73e3a2be5d2fbbf7.asciidoc000066400000000000000000000013271506101734100270520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:229 [source, python] ---- resp = client.search( size=10000, query={ "match": { "user.id": "elkbee" } }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", "keep_alive": "1m" }, sort=[ { "@timestamp": { "order": "asc", "format": "strict_date_optional_time_nanos" } }, { "_shard_doc": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e5f7a97efdbf517f7a2ed6ef7ff469c.asciidoc000066400000000000000000000005351506101734100271240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:409 [source, python] ---- resp = client.render_search_template( source="{ \"query\": { \"terms\": { \"tags\": {{#toJson}}tags{{/toJson}} }}}", params={ "tags": [ "prod", "es01" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e6b78ac991ed2d5f9a2e7c89f4fc471.asciidoc000066400000000000000000000005471506101734100267000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:121 [source, python] ---- resp = client.search( index="music", pretty=True, suggest={ "song-suggest": { "prefix": "nir", "completion": { "field": "suggest" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e926063a9494b563387617b08c4f232.asciidoc000066400000000000000000000003751506101734100260570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:284 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="*", verbose=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4e931cfac74e46e221cf4a9ab88a182d.asciidoc000066400000000000000000000003071506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/field-caps.asciidoc:251 [source, python] ---- resp = client.field_caps( fields="rating,title", include_unmapped=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ed946065faa92f9950f04e402676a97.asciidoc000066400000000000000000000002361506101734100263030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/info.asciidoc:206 [source, python] ---- resp = client.xpack.info( human=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4edfb5934d14ad7655bd7e19a112b5c0.asciidoc000066400000000000000000000026371506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:522 [source, python] ---- resp = client.search( index="cooking_blog", query={ "bool": { "must": [ { "term": { "tags": "vegetarian" } }, { "range": { "rating": { "gte": 4.5 } } } ], "should": [ { "term": { "category": "Main Course" } }, { "multi_match": { "query": "curry spicy", "fields": [ "title^2", "description" ] } }, { "range": { "date": { "gte": "now-1M/d" } } } ], "must_not": [ { "term": { "category.keyword": "Dessert" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ee31fd4ea6d18f32ec28b7fa433441d.asciidoc000066400000000000000000000007511506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/put-app-privileges.asciidoc:94 [source, python] ---- resp = client.security.put_privileges( privileges={ "myapp": { "read": { "actions": [ "data:read/*", "action:login" ], "metadata": { "description": "Read access to myapp" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4eeded40f30949e359714a5bb6c88612.asciidoc000066400000000000000000000005621506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:31 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "elser-embeddings", "pipeline": "elser_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f08d9e21d9f199acc77abfb83287878.asciidoc000066400000000000000000000007531506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/search-application-search.asciidoc:130 [source, python] ---- resp = client.search_application.search( name="my-app", params={ "query_string": "my first query", "text_fields": [ { "name": "title", "boost": 5 }, { "name": "description", "boost": 1 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f140d8922efdf3420e41b1cb669a289.asciidoc000066400000000000000000000003161506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-component-template.asciidoc:31 [source, python] ---- resp = client.cluster.delete_component_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f1e1205154d280db21fbd2754ed5398.asciidoc000066400000000000000000000010421506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:114 [source, python] ---- resp = client.indices.create( index="stats-index", mappings={ "properties": { "agg_metric": { "type": "aggregate_metric_double", "metrics": [ "min", "max", "sum", "value_count" ], "default_metric": "max" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f3366fc26e7ea4de446dfa5cdec9683.asciidoc000066400000000000000000000006771506101734100267530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:380 [source, python] ---- resp = client.search( query={ "function_score": { "gauss": { "@timestamp": { "origin": "2013-09-17", "scale": "10d", "offset": "5d", "decay": 0.5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f621ab694f62ddb89e0684a9e76c4d1.asciidoc000066400000000000000000000006331506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:586 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "fields": { "comment": { "fragment_size": 150, "number_of_fragments": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f666d710758578e2582850dac3ad144.asciidoc000066400000000000000000000004021506101734100262060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-user-profile-data.asciidoc:141 [source, python] ---- resp = client.security.get_user_profile( uid="u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", data="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f6694ef147a73b1163bde3c13779d26.asciidoc000066400000000000000000000003541506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/rejected-requests.asciidoc:68 [source, python] ---- resp = client.nodes.stats( human=True, filter_path="nodes.*.indexing_pressure", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f67b5f5c040f611bd2560a5d38ea6f5.asciidoc000066400000000000000000000005031506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/rare-terms-aggregation.asciidoc:331 [source, python] ---- resp = client.search( aggs={ "genres": { "rare_terms": { "field": "genre", "missing": "N/A" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4f8a4ad49e2bca6784c88ede18a1a709.asciidoc000066400000000000000000000002321506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/delete-license.asciidoc:43 [source, python] ---- resp = client.license.delete() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4fa9ee04188cbf0b38cfc28f6a56527d.asciidoc000066400000000000000000000003341506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-datafeed.asciidoc:80 [source, python] ---- resp = client.ml.get_datafeeds( datafeed_id="datafeed-high_sum_total_sales", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4fb0629146ca78b85e823edd405497bb.asciidoc000066400000000000000000000007661506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:914 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="loan_classification", source={ "index": "loan-applicants" }, dest={ "index": "loan-applicants-classified" }, analysis={ "classification": { "dependent_variable": "label", "training_percent": 75, "num_top_classes": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4fcca1687d7b2cf08de526539fea5a76.asciidoc000066400000000000000000000025041506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/text-expansion-query.asciidoc:119 [source, python] ---- resp = client.search( index="my-index", query={ "bool": { "should": [ { "text_expansion": { "ml.inference.title_expanded.predicted_value": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?", "boost": 1 } } }, { "text_expansion": { "ml.inference.description_expanded.predicted_value": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?", "boost": 1 } } }, { "multi_match": { "query": "How is the weather in Jamaica?", "fields": [ "title", "description" ], "boost": 4 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/4ff2dcec03fe097075cf1d174a019a1f.asciidoc000066400000000000000000000010111506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:721 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match_phrase": { "message": "number 1" } }, highlight={ "fields": { "message": { "type": "plain", "fragment_size": 15, "number_of_fragments": 3, "fragmenter": "simple" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50096ee0ca53fe8a88450ebb2a50f285.asciidoc000066400000000000000000000004041506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:143 [source, python] ---- resp = client.sql.query( format="csv", delimiter=";", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5024c524a7db0d6bb44c1820007cc5f4.asciidoc000066400000000000000000000012451506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/grok.asciidoc:39 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "...", "processors": [ { "grok": { "field": "message", "patterns": [ "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes:int} %{NUMBER:duration:double}" ] } } ] }, docs=[ { "_source": { "message": "55.3.244.1 GET /index.html 15824 0.043" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50522d3d5b3d055f712ad737e3d1707a.asciidoc000066400000000000000000000016261506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/token-count.asciidoc:14 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "name": { "type": "text", "fields": { "length": { "type": "token_count", "analyzer": "standard" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "name": "John Smith" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "name": "Rachel Alice Williams" }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "term": { "name.length": 3 } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/505a6c21a4cb608d3662fab1a35eb6df.asciidoc000066400000000000000000000021021506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/doc-count-field.asciidoc:54 [source, python] ---- resp = client.index( index="my_index", id="1", document={ "my_text": "histogram_1", "my_histogram": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] }, "_doc_count": 45 }, ) print(resp) resp1 = client.index( index="my_index", id="2", document={ "my_text": "histogram_2", "my_histogram": { "values": [ 0.1, 0.25, 0.35, 0.4, 0.45, 0.5 ], "counts": [ 8, 17, 8, 7, 6, 2 ] }, "_doc_count": 62 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/50764f4ea88079156b0aff2835bcdc45.asciidoc000066400000000000000000000003751506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:221 [source, python] ---- resp = client.cluster.state( metric="metadata", pretty=True, filter_path="metadata.stored_scripts", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5093bfd281dbe41bd0dba8ff979e6e47.asciidoc000066400000000000000000000002761506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/apis/get-stored-script-api.asciidoc:30 [source, python] ---- resp = client.get_script( id="my-stored-script", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50a9623c153cabe64101efb633e10e6c.asciidoc000066400000000000000000000003271506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/delete-autoscaling-policy.asciidoc:37 [source, python] ---- resp = client.autoscaling.delete_autoscaling_policy( name="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50b5c0332949d2154c72b629b5fa6222.asciidoc000066400000000000000000000005741506101734100261720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:345 [source, python] ---- resp = client.index( index="my-index-000001", refresh="wait_for", document={ "user_id": 12345 }, ) print(resp) resp1 = client.index( index="my-index-000001", refresh="wait_for", document={ "user_id": 12346 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/50c2b06ecddb5a4aebd8b78e38af5f1f.asciidoc000066400000000000000000000026171506101734100271400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:55 [source, python] ---- resp = client.ilm.put_lifecycle( name="my-lifecycle-policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb" } } }, "warm": { "min_age": "30d", "actions": { "shrink": { "number_of_shards": 1 }, "forcemerge": { "max_num_segments": 1 } } }, "cold": { "min_age": "60d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "frozen": { "min_age": "90d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "delete": { "min_age": "735d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50c2cea2adbe9523458c2686ab11df54.asciidoc000066400000000000000000000013401506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:206 [source, python] ---- resp = client.indices.create( index="text_payloads", mappings={ "properties": { "text": { "type": "text", "term_vector": "with_positions_payloads", "analyzer": "payload_delimiter" } } }, settings={ "analysis": { "analyzer": { "payload_delimiter": { "tokenizer": "whitespace", "filter": [ "delimited_payload" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50d5c5b7e8ed9a95b8d9a25a32a77425.asciidoc000066400000000000000000000004321506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/unique-tokenfilter.asciidoc:26 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "unique" ], text="the quick fox jumps the lazy fox", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50d9c0508ddb0fc5ba5a893eec219dd8.asciidoc000066400000000000000000000003461506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:129 [source, python] ---- resp = client.index( index="idx", id="1", document={ "foo.bar.baz": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50dc35d3d8705bd62aed20a15209476c.asciidoc000066400000000000000000000010351506101734100264000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:364 [source, python] ---- resp = client.security.put_role_mapping( name="mapping9", rules={ "field": { "realm.name": "cloud-saml" } }, role_templates=[ { "template": { "source": "saml_user" } }, { "template": { "source": "_user_{{username}}" } } ], enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/50f922e9f002d8ac570953be59414b7b.asciidoc000066400000000000000000000005671506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/combined-fields-query.asciidoc:156 [source, python] ---- resp = client.search( query={ "combined_fields": { "query": "database systems", "fields": [ "title", "abstract" ], "operator": "and" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/511e5bb8ab881171b7e8629095e30b85.asciidoc000066400000000000000000000002611506101734100262630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:400 [source, python] ---- resp = client.search( index="datastream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/515e1104d136082e826d1b32af011759.asciidoc000066400000000000000000000007501506101734100260750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/nested-aggregation.asciidoc:38 [source, python] ---- resp = client.index( index="products", id="0", refresh=True, document={ "name": "LED TV", "resellers": [ { "reseller": "companyA", "price": 350 }, { "reseller": "companyB", "price": 500 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5174c3c731fc1703e5b43ae2bae7a80e.asciidoc000066400000000000000000000004471506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/clear-sql-cursor-api.asciidoc:29 [source, python] ---- resp = client.sql.clear_cursor( cursor="sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/518fcf1dc1edd7dba0864accf71b49f4.asciidoc000066400000000000000000000004551506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:48 [source, python] ---- resp = client.search( index="my-index-000001", preference="_local", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5195a88194f7a139c635a84398d76205.asciidoc000066400000000000000000000003531506101734100260670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/restore-snapshot-api.asciidoc:60 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/519e46350316a33162740e5d7968aa2c.asciidoc000066400000000000000000000007511506101734100261170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1103 [source, python] ---- resp = client.search( index="image-index", knn={ "field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 10, "num_candidates": 100, "rescore_vector": { "oversample": 2 } }, fields=[ "title", "file-type" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/51b40610ae05730b4c6afd25647d7ae0.asciidoc000066400000000000000000000013331506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:489 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "date": "2015-10-01T05:30:00Z" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "date": "2015-10-01T06:30:00Z" }, ) print(resp1) resp2 = client.search( index="my-index-000001", size="0", aggs={ "by_day": { "date_histogram": { "field": "date", "calendar_interval": "day", "offset": "+6h" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/51b44224feee6e2e5974824334474c77.asciidoc000066400000000000000000000006101506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-s3.asciidoc:371 [source, python] ---- resp = client.snapshot.create_repository( name="my_s3_repository", repository={ "type": "s3", "settings": { "client": "my-client", "bucket": "my-bucket", "endpoint": "my.s3.endpoint" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/51f1a0930362594b231a5bcc17673768.asciidoc000066400000000000000000000010131506101734100261030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/modify-data-streams-api.asciidoc:17 [source, python] ---- resp = client.indices.modify_data_stream( actions=[ { "remove_backing_index": { "data_stream": "my-logs", "index": ".ds-my-logs-2099.01.01-000001" } }, { "add_backing_index": { "data_stream": "my-logs", "index": "index-to-add" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/51f6cb682424e110f289af79c106f4c7.asciidoc000066400000000000000000000004201506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/troubleshooting-shards-capacity.asciidoc:401 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.max_shards_per_node.frozen": 3200 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5275842787967b6db876025f4a1c6942.asciidoc000066400000000000000000000006411506101734100260720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters.asciidoc:128 [source, python] ---- resp = client.search( suggest={ "text": "tring out Elasticsearch", "my-suggest-1": { "term": { "field": "message" } }, "my-suggest-2": { "term": { "field": "user" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5276a831513623e43ed567eb52b6dba9.asciidoc000066400000000000000000000006251506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:109 [source, python] ---- resp = client.index( index="my-index-000001", routing="my-routing-value", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/528e5f1c345c3769248cc6889e8cf552.asciidoc000066400000000000000000000004561506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:47 [source, python] ---- resp = client.indices.put_mapping( index="index", properties={ "title": { "type": "text", "similarity": "my_similarity" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/529671ffaf7cc75fe83a81d729788be4.asciidoc000066400000000000000000000012421506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-known-issues.asciidoc:124 [source, python] ---- resp = client.update( index=".elastic-connectors", id="connector_id", doc={ "configuration": { "field_a": { "type": "str", "value": "" }, "field_b": { "type": "bool", "value": False }, "field_c": { "type": "int", "value": 1 }, "field_d": { "type": "list", "value": "a,b" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/529b975b7cedaac58dce9821956adc37.asciidoc000066400000000000000000000043271506101734100266700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:390 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "MultiPolygon", "coordinates": [ [ [ [ 102, 2 ], [ 103, 2 ], [ 103, 3 ], [ 102, 3 ], [ 102, 2 ] ] ], [ [ [ 100, 0 ], [ 101, 0 ], [ 101, 1 ], [ 100, 1 ], [ 100, 0 ] ], [ [ 100.2, 0.2 ], [ 100.8, 0.2 ], [ 100.8, 0.8 ], [ 100.2, 0.8 ], [ 100.2, 0.2 ] ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52a2d119addb15366a935115518335fd.asciidoc000066400000000000000000000005441506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shrink-index.asciidoc:52 [source, python] ---- resp = client.indices.put_settings( index="my_source_index", settings={ "settings": { "index.number_of_replicas": 0, "index.routing.allocation.require._name": "shrink_node_name" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52b71aa4ae6563abae78cd20ff06d1e9.asciidoc000066400000000000000000000003631506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:148 [source, python] ---- resp = client.nodes.stats( human=True, filter_path="nodes.*.name,nodes.*.indices.indexing", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52bc577a0d0cd42b46f33e0ef5124df8.asciidoc000066400000000000000000000007261506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:644 [source, python] ---- resp = client.put_script( id="my-search-template", script={ "lang": "mustache", "source": { "query": { "match": { "message": "{{query_string}}" } }, "from": "{{from}}", "size": "{{size}}" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52be795b68e6ef3f396f35fea52d0481.asciidoc000066400000000000000000000004521506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:51 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52c2b4c180388f5ae044588ba70b70f0.asciidoc000066400000000000000000000012101506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/knn-query.asciidoc:178 [source, python] ---- resp = client.search( index="my-image-index", size=10, query={ "bool": { "must": { "knn": { "field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 3 } }, "filter": { "term": { "file-type": "png" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52c7e4172a446c394210a07c464c57d2.asciidoc000066400000000000000000000003561506101734100261710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:606 [source, python] ---- resp = client.delete_by_query_rethrottle( task_id="r1A2WoRbTwKZ516z6NEs5A:36619", requests_per_second="-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52cdb5526ce69d0223d1dd198308bfea.asciidoc000066400000000000000000000011131506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/dynamic.asciidoc:53 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic": False, "properties": { "user": { "properties": { "name": { "type": "text" }, "social_networks": { "dynamic": True, "properties": {} } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52f1c1689ab35353858cdeaab7597546.asciidoc000066400000000000000000000007301506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/common-log-format-example.asciidoc:174 [source, python] ---- resp = client.index( index="my-data-stream", pipeline="my-pipeline", document={ "message": "89.160.20.128 - - [05/May/2099:16:21:15 +0000] \"GET /favicon.ico HTTP/1.1\" 200 3638 \"-\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36\"" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52f4c5eb08d39f98e2e2f5527ece9731.asciidoc000066400000000000000000000011041506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-alibabacloud-ai-search.asciidoc:210 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="alibabacloud_ai_search_sparse", inference_config={ "service": "alibabacloud-ai-search", "service_settings": { "api_key": "", "service_id": "ops-text-sparse-embedding-001", "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", "workspace": "default" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/52fd112e970882c4d7cc4b0cca8e2c6f.asciidoc000066400000000000000000000010021506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/numeric.asciidoc:23 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "number_of_bytes": { "type": "integer" }, "time_in_seconds": { "type": "float" }, "price": { "type": "scaled_float", "scaling_factor": 100 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5302f4f2bcc0f400ff71c791e6f68d7b.asciidoc000066400000000000000000000006271506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-marker-tokenfilter.asciidoc:95 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "keyword_marker", "keywords": [ "jumping" ] }, "stemmer" ], text="fox running and jumping", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5305bc07c1bf90bab3e8db1de3e31b26.asciidoc000066400000000000000000000004631506101734100266720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shutdown/apis/shutdown-put.asciidoc:102 [source, python] ---- resp = client.shutdown.put_node( node_id="USpTGYaBSIKbgSUJR2Z9lg", type="restart", reason="Demonstrating how the node shutdown API works", allocation_delay="20m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/532ddf9afdcd0b1c9c0bb331e74d8df3.asciidoc000066400000000000000000000005031506101734100270450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:158 [source, python] ---- resp = client.indices.create( index="index_long", mappings={ "properties": { "field": { "type": "long" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/532f371934b61fb4992d37bedcc085de.asciidoc000066400000000000000000000004621506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shutdown/apis/shutdown-get.asciidoc:55 [source, python] ---- resp = client.shutdown.put_node( node_id="USpTGYaBSIKbgSUJR2Z9lg", type="restart", reason="Demonstrating how the node shutdown API works", allocation_delay="10m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5330191ec9f11281ebf6867bf11c58ae.asciidoc000066400000000000000000000004641506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:394 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", routing="1", query={ "range": { "age": { "gte": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5332c4cca5fbb45cc700dcd34f37bc38.asciidoc000066400000000000000000000004161506101734100267010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:557 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "english": "Some English text", "count": 5 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/537bce129338d9227bccb6a0283dab45.asciidoc000066400000000000000000000004101506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:232 [source, python] ---- resp = client.cluster.put_settings( persistent={ "migrate.data_stream_reindex_max_request_per_second": 10000 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53aa8b21e2b1c4d48960343711296704.asciidoc000066400000000000000000000004121506101734100261010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/regexp-syntax.asciidoc:60 [source, python] ---- resp = client.search( index="my-index-000001", query={ "regexp": { "my_field.keyword": "a\\\\.*" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53b908c3432118c5a6e460f74d32006b.asciidoc000066400000000000000000000005161506101734100261640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:11 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "this is a test", "fields": [ "subject", "message" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53bb7f0e3429861aadb8dd3d588085cd.asciidoc000066400000000000000000000004461506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:272 [source, python] ---- resp = client.search( index="my-data-stream", seq_no_primary_term=True, query={ "match": { "user.id": "yWIumJd7" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53c6256295111524d5ff2885bdcb99a9.asciidoc000066400000000000000000000003501506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/get-transform-stats.asciidoc:328 [source, python] ---- resp = client.transform.get_transform( transform_id="_stats", from_="5", size="10", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53d9d2ec9cb8d211772d764e76fe6890.asciidoc000066400000000000000000000005671506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/inference.asciidoc:784 [source, python] ---- resp = client.ingest.simulate( id="query_helper_pipeline", docs=[ { "_source": { "content": "artificial intelligence in medicine articles published in the last 12 months" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/53e4ac5a4009fd21024f4b31e54aa83f.asciidoc000066400000000000000000000004261506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/oidc-guide.asciidoc:619 [source, python] ---- resp = client.security.put_user( username="facilitator", password="", roles=[ "facilitator-role" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/54059961f05904368ced52c894a50e23.asciidoc000066400000000000000000000014301506101734100261270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:214 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_moving_max": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.max(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/540aefc39303c925a4efff71ebe2f002.asciidoc000066400000000000000000000005171506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:560 [source, python] ---- resp = client.search( aggs={ "tags": { "significant_terms": { "field": "tag", "min_doc_count": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5433bb83628cc91d81fbe53c533b2a09.asciidoc000066400000000000000000000007651506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/asciifolding-tokenfilter.asciidoc:83 [source, python] ---- resp = client.indices.create( index="asciifold_example", settings={ "analysis": { "analyzer": { "standard_asciifolding": { "tokenizer": "standard", "filter": [ "asciifolding" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5457c94f0039c6b95c7f9f305d0c6b58.asciidoc000066400000000000000000000005121506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2538 [source, python] ---- resp = client.nodes.stats( metric="indices", ) print(resp) resp1 = client.nodes.stats( metric="os,process", ) print(resp1) resp2 = client.nodes.stats( node_id="10.0.0.1", metric="process", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/548a9b6f447bb820380c1c23e57c18c3.asciidoc000066400000000000000000000010001506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:15 [source, python] ---- resp = client.ingest.put_pipeline( id="cohere_embeddings_pipeline", processors=[ { "inference": { "model_id": "cohere_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/548b85bd9e6e7d33e36133953869449b.asciidoc000066400000000000000000000003701506101734100262360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:338 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.monitoring.collection.enabled": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/54a215d242ab65123b09e9dfb71bcbbf.asciidoc000066400000000000000000000005471506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:237 [source, python] ---- resp = client.search( aggs={ "genres": { "terms": { "field": "genre", "order": { "_key": "asc" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/54a47b5d07e7bfbea75c77f35eaae18d.asciidoc000066400000000000000000000004621506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-known-issues.asciidoc:77 [source, python] ---- resp = client.indices.put_mapping( index=".elastic-connectors-sync-jobs-v1", properties={ "job_type": { "type": "keyword" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/54c12d5099d7b715c15f5bbf65b386a1.asciidoc000066400000000000000000000007741506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:310 [source, python] ---- resp = client.indices.create( index="alibabacloud-ai-search-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1024, "element_type": "float" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55085e6a2891040b6ac696561d0787c8.asciidoc000066400000000000000000000013711506101734100261350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/passthrough.asciidoc:93 [source, python] ---- resp = client.indices.create( index="my-index-000002", mappings={ "properties": { "attributes": { "type": "passthrough", "priority": 10, "properties": { "id": { "type": "keyword" } } }, "resource.attributes": { "type": "passthrough", "priority": 20, "properties": { "id": { "type": "keyword" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55096381f811388fafd8e244dd2402c8.asciidoc000066400000000000000000000003541506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:451 [source, python] ---- resp = client.indices.rollover( alias="my-alias", settings={ "index.number_of_shards": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/551467688d8c701315d0a371850a4056.asciidoc000066400000000000000000000006001506101734100257510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:54 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "hugging-face-embeddings", "pipeline": "hugging_face_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/551799fef2f86e393db83a967e4a30d1.asciidoc000066400000000000000000000016601506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:264 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "agg_metric": { "type": "aggregate_metric_double", "metrics": [ "min", "max", "sum", "value_count" ], "default_metric": "max" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "agg_metric": { "min": -302.5, "max": 702.3, "sum": 200, "value_count": 25 } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/553904c175a76d5ba83bc5d46fff7373.asciidoc000066400000000000000000000004131506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:1031 [source, python] ---- resp = client.security.saml_logout( token="46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", refresh_token="mJdXLtmvTUSpoLwMvdBt_w", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/553d79817bb1333970e99507c37a159a.asciidoc000066400000000000000000000005411506101734100261360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:522 [source, python] ---- resp = client.indices.create( index="index", settings={ "index": { "similarity": { "default": { "type": "boolean" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5553cf7a02c22f616cd994747f2dd5a5.asciidoc000066400000000000000000000010151506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/nested.asciidoc:60 [source, python] ---- resp = client.search( index="my-index-000001", query={ "bool": { "must": [ { "match": { "user.first": "Alice" } }, { "match": { "user.last": "Smith" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5566cff431570f522e1fc5475b2ed875.asciidoc000066400000000000000000000035271506101734100263620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/phrase-suggest.asciidoc:22 [source, python] ---- resp = client.indices.create( index="test", settings={ "index": { "number_of_shards": 1, "analysis": { "analyzer": { "trigram": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "shingle" ] }, "reverse": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "reverse" ] } }, "filter": { "shingle": { "type": "shingle", "min_shingle_size": 2, "max_shingle_size": 3 } } } } }, mappings={ "properties": { "title": { "type": "text", "fields": { "trigram": { "type": "text", "analyzer": "trigram" }, "reverse": { "type": "text", "analyzer": "reverse" } } } } }, ) print(resp) resp1 = client.index( index="test", refresh=True, document={ "title": "noble warriors" }, ) print(resp1) resp2 = client.index( index="test", refresh=True, document={ "title": "nobel prize" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/55838e0b21c4f4da2dc8aaec045a6d5f.asciidoc000066400000000000000000000012031506101734100267010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:185 [source, python] ---- resp = client.search( index="latency", size=0, runtime_mappings={ "load_time.seconds": { "type": "long", "script": { "source": "emit(doc['load_time'].value / params.timeUnit)", "params": { "timeUnit": 1000 } } } }, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time.seconds" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/558b3f9b987771e9f9f35e51a0d7e062.asciidoc000066400000000000000000000013761506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:1160 [source, python] ---- resp = client.indices.create( index="my-dfs-index", settings={ "number_of_shards": 2, "number_of_replicas": 1 }, mappings={ "properties": { "my-keyword": { "type": "keyword" } } }, ) print(resp) resp1 = client.bulk( index="my-dfs-index", refresh=True, operations=[ { "index": { "_id": "1" } }, { "my-keyword": "a" }, { "index": { "_id": "2" } }, { "my-keyword": "b" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/5597eeb8f43b5d47bd07f27122c24194.asciidoc000066400000000000000000000007361506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:1073 [source, python] ---- resp = client.async_search.submit( index="my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", ccs_minimize_roundtrips=False, query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55d349ccb0efd5e1c06c6dd383a593cf.asciidoc000066400000000000000000000006661506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:1030 [source, python] ---- resp = client.async_search.submit( index="my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001", query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55e8ddf643726dec51531ada0bec7143.asciidoc000066400000000000000000000002231506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-stats.asciidoc:32 [source, python] ---- resp = client.slm.get_stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55f0fec6342f677af74de2124b801aa2.asciidoc000066400000000000000000000006111506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:229 [source, python] ---- resp = client.search( index="byte-image-index", knn={ "field": "byte-image-vector", "query_vector": [ -5, 9 ], "k": 10, "num_candidates": 100 }, fields=[ "title" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/55f4a15b84b724b9fbf2efd29a4da120.asciidoc000066400000000000000000000002471506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/authenticate.asciidoc:41 [source, python] ---- resp = client.security.authenticate() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5619103306878d58a058bce87c5bd82b.asciidoc000066400000000000000000000002711506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/recovery.asciidoc:342 [source, python] ---- resp = client.indices.recovery( human=True, detailed=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5632c3b947062d3a5fc0e4f3413b3308.asciidoc000066400000000000000000000005211506101734100262400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/register-fs-repo.asciidoc:17 [source, python] ---- resp = client.snapshot.create_repository( name="my_fs_backup", repository={ "type": "fs", "settings": { "location": "/mount/backups/my_fs_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/563dfbf421422c837ee6929ae2ede876.asciidoc000066400000000000000000000003051506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/migrate-to-data-stream.asciidoc:59 [source, python] ---- resp = client.indices.migrate_to_data_stream( name="my-logs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/565386eee0951865a684e41fab53b40c.asciidoc000066400000000000000000000010241506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elser.asciidoc:128 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="my-elser-model", inference_config={ "service": "elser", "service_settings": { "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 3, "max_number_of_allocations": 10 }, "num_threads": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56563f91d9f0b74e9e4aae9cb221845b.asciidoc000066400000000000000000000017561506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-cross-cluster-api-key.asciidoc:111 [source, python] ---- resp = client.perform_request( "POST", "/_security/cross_cluster/api_key", headers={"Content-Type": "application/json"}, body={ "name": "my-cross-cluster-api-key", "expiration": "1d", "access": { "search": [ { "names": [ "logs*" ] } ], "replication": [ { "names": [ "archive*" ] } ] }, "metadata": { "description": "phase one", "environment": { "level": 1, "trusted": True, "tags": [ "dev", "staging" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/565908b03edff1d6e6e7cdfb92177faf.asciidoc000066400000000000000000000010001506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/stats-aggregation.asciidoc:53 [source, python] ---- resp = client.search( index="exams", size=0, runtime_mappings={ "grade.weighted": { "type": "double", "script": "\n emit(doc['grade'].value * doc['weight'].value)\n " } }, aggs={ "grades_stats": { "stats": { "field": "grade.weighted" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/568979150ce18739f8d3ea859355aaa3.asciidoc000066400000000000000000000003251506101734100263100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-users.asciidoc:92 [source, python] ---- resp = client.security.get_user( username="jacknich", with_profile_uid=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/569f10fee671632017c722fd983009d4.asciidoc000066400000000000000000000020451506101734100262100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:548 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "shop": { "terms": { "field": "shop" } } }, { "product": { "terms": { "field": "product" } } }, { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56a1aa4f7fa62f2289e20607e3039bf3.asciidoc000066400000000000000000000004131506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:19 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "email": { "type": "keyword" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56a903530990313b753b1be33578997a.asciidoc000066400000000000000000000016151506101734100260510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:448 [source, python] ---- resp = client.search( query={ "dis_max": { "queries": [ { "multi_match": { "query": "Will Smith", "type": "cross_fields", "fields": [ "first", "last" ], "minimum_should_match": "50%" } }, { "multi_match": { "query": "Will Smith", "type": "cross_fields", "fields": [ "*.edge" ] } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56b6b50b174a935d368301ebd717231d.asciidoc000066400000000000000000000002671506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/stats.asciidoc:125 [source, python] ---- resp = client.watcher.stats( metric="current_watches", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56da252798b8e7b006738428aa1a7f4c.asciidoc000066400000000000000000000012031506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:373 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "my_range": { "type": "long_range" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "my_range": { "gt": 200, "lt": 300 } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/56da9c55774f4c2e8eadde0579bdc60c.asciidoc000066400000000000000000000010711506101734100267330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:463 [source, python] ---- resp = client.search( index="test*", filter_path="aggregations", aggs={ "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "s": { "order": "asc", "numeric_type": "double" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56db76c987106a870357854d3068ad98.asciidoc000066400000000000000000000002621506101734100261510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/list-query-rulesets.asciidoc:164 [source, python] ---- resp = client.query_rules.list_rulesets() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56e90a63f94eeb882fe8acbcd74229c2.asciidoc000066400000000000000000000014301506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:256 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_moving_min": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.min(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56f3a6bec7be5a90fb43144c331a5b5a.asciidoc000066400000000000000000000003271506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:260 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", flat_settings=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/56fa6c9e08258157d445e2f92274962b.asciidoc000066400000000000000000000006361506101734100262310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:220 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "shingle", "min_shingle_size": 2, "max_shingle_size": 3, "output_unigrams": False } ], text="quick brown fox jumps", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/571314a948e49f1f9614d36fcf79392a.asciidoc000066400000000000000000000004031506101734100263040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:877 [source, python] ---- resp = client.async_search.get( id="FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/578808065fee8691355b8f25c35782cd.asciidoc000066400000000000000000000005321506101734100262340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:1023 [source, python] ---- resp = client.search( index="my-index-000001", filter_path="profile.shards.fetch", profile=True, query={ "term": { "user.id": { "value": "elkbee" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5797df4b8e71d821a1488cbb63481104.asciidoc000066400000000000000000000003351506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/troubleshooting-shards-capacity.asciidoc:418 [source, python] ---- resp = client.health_report( feature="shards_capacity", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/57a3e8d2ca64e37e90d658c4cd935399.asciidoc000066400000000000000000000011701506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/distance-feature-query.asciidoc:127 [source, python] ---- resp = client.search( index="items", query={ "bool": { "must": { "match": { "name": "chocolate" } }, "should": { "distance_feature": { "field": "location", "pivot": "1000m", "origin": [ -71.3, 41.15 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/57c690f8fa95bacf4b250803be7467e4.asciidoc000066400000000000000000000003651506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:427 [source, python] ---- resp = client.index( index="example", document={ "location": "BBOX (1000.0, 1002.0, 2000.0, 1000.0)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/57dc15e5ad663c342fd5c1d86fcd1b29.asciidoc000066400000000000000000000005121506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/oidc-prepare-authentication-api.asciidoc:106 [source, python] ---- resp = client.security.oidc_prepare_authentication( realm="oidc1", state="lGYK0EcSLjqH6pkT5EVZjC6eIW5YCGgywj2sxROO", nonce="zOBXLJGUooRrbLbQk5YCcyC8AXw3iloynvluYhZ5", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/57e0bbab98f17d5b564d1ea146a55fe4.asciidoc000066400000000000000000000014651506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:227 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "temp*" ], priority=0, template={ "settings": { "number_of_shards": 1, "number_of_replicas": 0 }, "mappings": { "_source": { "enabled": False } } }, ) print(resp) resp1 = client.indices.put_index_template( name="template_2", index_patterns=[ "template*" ], priority=1, template={ "settings": { "number_of_shards": 2 }, "mappings": { "_source": { "enabled": True } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/582c4b05401dbc190b19411282d85310.asciidoc000066400000000000000000000006171506101734100260720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:380 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script={ "source": "if (ctx._source.tags.contains(params['tag'])) { ctx.op = 'delete' } else { ctx.op = 'none' }", "lang": "painless", "params": { "tag": "green" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/582da02c09e0597b4396c87e33571e7b.asciidoc000066400000000000000000000004511506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:311 [source, python] ---- resp = client.sql.query( format="json", cursor="sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5836b09198feb1269ed12839b416123d.asciidoc000066400000000000000000000004751506101734100262150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:218 [source, python] ---- resp = client.search( index="jinaai-index", query={ "semantic": { "field": "content", "query": "who inspired taking care of the sea?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5837d5f50665ac0a26181d3aaeb3f204.asciidoc000066400000000000000000000004161506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/start-trained-model-deployment.asciidoc:214 [source, python] ---- resp = client.ml.start_trained_model_deployment( model_id="my_model", deployment_id="my_model_for_search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/584f502cf840134f2db5f39e2483ced1.asciidoc000066400000000000000000000021671506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1454 [source, python] ---- resp = client.indices.create( index="portuguese_example", settings={ "analysis": { "filter": { "portuguese_stop": { "type": "stop", "stopwords": "_portuguese_" }, "portuguese_keywords": { "type": "keyword_marker", "keywords": [ "exemplo" ] }, "portuguese_stemmer": { "type": "stemmer", "language": "light_portuguese" } }, "analyzer": { "rebuilt_portuguese": { "tokenizer": "standard", "filter": [ "lowercase", "portuguese_stop", "portuguese_keywords", "portuguese_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/585a34ad79aee16678b37da785933ac8.asciidoc000066400000000000000000000002111506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/stop.asciidoc:85 [source, python] ---- resp = client.ilm.stop() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/585b19369cb9b9763a7e8d405f009a47.asciidoc000066400000000000000000000003461506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:249 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "day_of_week": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5865ca8d2bcd087ed5dbee33fafee57f.asciidoc000066400000000000000000000003611506101734100271550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc:111 [source, python] ---- resp = client.indices.explain_data_lifecycle( index=".ds-my-data-stream-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/586cfa0e5fd695b7d451e854f9fb4a9c.asciidoc000066400000000000000000000017411506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:20 [source, python] ---- resp = client.indices.create( index="my_locations", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.index( index="my_locations", id="1", refresh=True, document={ "location": "POINT(4.912350 52.374081)", "city": "Amsterdam", "name": "NEMO Science Museum" }, ) print(resp1) resp2 = client.index( index="my_locations", id="2", refresh=True, document={ "location": "POINT(4.405200 51.222900)", "city": "Antwerp", "name": "Letterenhuis" }, ) print(resp2) resp3 = client.index( index="my_locations", id="3", refresh=True, document={ "location": "POINT(2.336389 48.861111)", "city": "Paris", "name": "Musée du Louvre" }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/58ca855be30049f8f0879e532db51ee2.asciidoc000066400000000000000000000024021506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/put-transform.asciidoc:320 [source, python] ---- resp = client.transform.put_transform( transform_id="ecommerce_transform1", source={ "index": "kibana_sample_data_ecommerce", "query": { "term": { "geoip.continent_name": { "value": "Asia" } } } }, pivot={ "group_by": { "customer_id": { "terms": { "field": "customer_id", "missing_bucket": True } } }, "aggregations": { "max_price": { "max": { "field": "taxful_total_price" } } } }, description="Maximum priced ecommerce data by customer_id in Asia", dest={ "index": "kibana_sample_data_ecommerce_transform1", "pipeline": "add_timestamp_pipeline" }, frequency="5m", sync={ "time": { "field": "order_date", "delay": "60s" } }, retention_policy={ "time": { "field": "order_date", "max_age": "30d" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/58dd26afc919722e21358c91e112b27a.asciidoc000066400000000000000000000005321506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:459 [source, python] ---- resp = client.search( index="cooking_blog", query={ "range": { "date": { "gte": "2023-05-01", "lte": "2023-05-31" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/58e684e0b771b4646662fe12d3060c05.asciidoc000066400000000000000000000007541506101734100262050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/cjk-width-tokenfilter.asciidoc:69 [source, python] ---- resp = client.indices.create( index="cjk_width_example", settings={ "analysis": { "analyzer": { "standard_cjk_width": { "tokenizer": "standard", "filter": [ "cjk_width" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/58f72be60c25752d7899a35fc60fe6eb.asciidoc000066400000000000000000000003631506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/misc.asciidoc:182 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.indices.recovery": "DEBUG" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/591c7fb7451069829a14bba593136f1f.asciidoc000066400000000000000000000003251506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/forecast.asciidoc:88 [source, python] ---- resp = client.ml.forecast( job_id="low_request_rate", duration="10d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5969c446688c8b326acc80276573e9d2.asciidoc000066400000000000000000000015031506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:324 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "number_of_fragments": 3, "fragment_size": 150, "fields": { "body": { "pre_tags": [ "" ], "post_tags": [ "" ] }, "blog.title": { "number_of_fragments": 0 }, "blog.author": { "number_of_fragments": 0 }, "blog.comment": { "number_of_fragments": 5, "order": "score" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59726e3c90e1218487a781508788c243.asciidoc000066400000000000000000000006341506101734100260070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:293 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sale_date": { "auto_date_histogram": { "field": "date", "buckets": 10, "missing": "2000/01/01" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/597d456edfcb3d410954a3e9b5babf9a.asciidoc000066400000000000000000000007061506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/disk-usage.asciidoc:51 [source, python] ---- resp = client.indices.create( index="index", mappings={ "dynamic_templates": [ { "strings": { "match_mapping_type": "string", "mapping": { "type": "keyword" } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5987afb2c17c73fe3d860937565ef115.asciidoc000066400000000000000000000003211506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:46 [source, python] ---- resp = client.open_point_in_time( index="my-index-000001", keep_alive="1m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/599454613ac699d447537e79e65ae35a.asciidoc000066400000000000000000000006541506101734100262430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:67 [source, python] ---- resp = client.search( index="my-index-000001", script_fields={ "my_doubled_field": { "script": { "source": "doc['my_field'].value * params['multiplier']", "params": { "multiplier": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/599f693cc7d30b1153f5eeecec8eb23a.asciidoc000066400000000000000000000003211506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-index-template-v1.asciidoc:35 [source, python] ---- resp = client.indices.delete_template( name="my-legacy-index-template", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59aa5216630f80c5dc298fc5bba4a819.asciidoc000066400000000000000000000003261506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:415 [source, python] ---- resp = client.indices.get_settings( index=".reindexed-v9-ml-anomalies-custom-example", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59b8b9555f4aa30bc4613f819e9fc8f0.asciidoc000066400000000000000000000002541506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/close.asciidoc:78 [source, python] ---- resp = client.indices.close( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59d015f7bd0eeab40d0885010a62fa70.asciidoc000066400000000000000000000012001506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/role-templates.asciidoc:52 [source, python] ---- resp = client.security.put_role( name="example2", indices=[ { "names": [ "my-index-000001" ], "privileges": [ "read" ], "query": { "template": { "source": { "term": { "group.id": "{{_user.metadata.group_id}}" } } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59d736a4d064ed2013c7ead8e32e0998.asciidoc000066400000000000000000000006141506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-openai.asciidoc:177 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="openai-completion", inference_config={ "service": "openai", "service_settings": { "api_key": "", "model_id": "gpt-3.5-turbo" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/59f0ad2a6f97200e98e8eb079cdd8334.asciidoc000066400000000000000000000003171506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:162 [source, python] ---- resp = client.mget( index="my-index-000001", ids=[ "1", "2" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5a006feed86309b547bbaa1baca1c496.asciidoc000066400000000000000000000034771506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:148 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "numeric_counts": { "match_mapping_type": [ "long", "double" ], "match": "count", "mapping": { "type": "{dynamic_type}", "index": False } } }, { "integers": { "match_mapping_type": "long", "mapping": { "type": "integer" } } }, { "strings": { "match_mapping_type": "string", "mapping": { "type": "text", "fields": { "raw": { "type": "keyword", "ignore_above": 256 } } } } }, { "non_objects_keyword": { "match_mapping_type": "*", "unmatch_mapping_type": "object", "mapping": { "type": "keyword" } } } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "my_integer": 5, "my_string": "Some string", "my_boolean": "false", "field": { "count": 4 } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/5a3855f1b3e37d89ab7cbcc4f7ae1dd3.asciidoc000066400000000000000000000005411506101734100270010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc:43 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "limit", "max_token_count": 2 } ], text="quick fox jumps over lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5a3fe9584d203d1fd6c96981ba34e0de.asciidoc000066400000000000000000000006251506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:17 [source, python] ---- resp = client.indices.create( index="postal_codes", mappings={ "properties": { "location": { "type": "geo_shape" }, "postal_code": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5a6bb9ac6830668ecc00550c1aa8f2f1.asciidoc000066400000000000000000000007241506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:286 [source, python] ---- resp = client.security.put_role( name="logstash-reader", indices=[ { "names": [ "logstash-*" ], "privileges": [ "read_cross_cluster", "read", "view_index_metadata" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5a754dcc854b9154296550a0b581cb9d.asciidoc000066400000000000000000000005601506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:50 [source, python] ---- resp = client.search( index="network-traffic", size=0, aggs={ "ipv4-subnets": { "ip_prefix": { "field": "ipv4", "prefix_length": 24 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5a7f05ab1d05b4eef5ff327168517165.asciidoc000066400000000000000000000005101506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:140 [source, python] ---- resp = client.search( index="my-index-000001", routing="my-routing-value,my-routing-value-2", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5ab9b44939fb30f5b4adbdcc4bcc0733.asciidoc000066400000000000000000000010171506101734100267630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-ilm.asciidoc:53 [source, python] ---- resp = client.ilm.put_lifecycle( name="datastream_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_age": "5m" }, "downsample": { "fixed_interval": "1h" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5ad365ed9e1a3c26093a0f09666c133a.asciidoc000066400000000000000000000007351506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:252 [source, python] ---- resp = client.security.put_role_mapping( name="mapping5", role_templates=[ { "template": { "source": "{{#tojson}}groups{{/tojson}}" }, "format": "json" } ], rules={ "field": { "realm.name": "saml1" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5afbd9caed88c32f8a2968c07054f096.asciidoc000066400000000000000000000003041506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/logstash/delete-pipeline.asciidoc:73 [source, python] ---- resp = client.logstash.delete_pipeline( id="my_pipeline", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b0cc9e186a8f765a11141809b8b17b7.asciidoc000066400000000000000000000003501506101734100263360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/list-search-applications.asciidoc:106 [source, python] ---- resp = client.search_application.list( from_="0", size="3", q="app*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b191f2dbfa46c774cc9b9b9e8d1d831.asciidoc000066400000000000000000000002651506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-user-privileges.asciidoc:40 [source, python] ---- resp = client.security.get_user_privileges() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b1ae98ad03e2819fc7c3468840ef448.asciidoc000066400000000000000000000004401506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:637 [source, python] ---- resp = client.eql.search( index="my-index*", query="\n sample by host\n [any where uptime > 0]\n [any where port > 100]\n [any where bool == true]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b266deba5396c7810af1b8315c23596.asciidoc000066400000000000000000000005321506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:62 [source, python] ---- resp = client.search( index="my_locations", size=0, aggs={ "grouped": { "geohash_grid": { "field": "location", "precision": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b281956e35a26e734c482b42b356c0d.asciidoc000066400000000000000000000002621506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/alias-exists.asciidoc:16 [source, python] ---- resp = client.indices.exists_alias( name="my-alias", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b2a13366bd4e1ab4b25d04d360570dc.asciidoc000066400000000000000000000007171506101734100264500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-component-template.asciidoc:262 [source, python] ---- resp = client.cluster.put_component_template( name="template_1", template={ "settings": { "number_of_shards": 1 } }, meta={ "description": "set number of shards to one", "serialization": { "class": "MyComponentTemplate", "id": 10 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b3384992c398ea8a3064d2e08725e2b.asciidoc000066400000000000000000000027421506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:291 [source, python] ---- resp = client.indices.create( index="node", mappings={ "properties": { "ip": { "type": "ip" }, "date": { "type": "date" } } }, ) print(resp) resp1 = client.bulk( index="node", refresh=True, operations=[ { "index": {} }, { "ip": "192.168.0.1", "date": "2020-01-01T01:01:01", "m": 1 }, { "index": {} }, { "ip": "192.168.0.1", "date": "2020-01-01T02:01:01", "m": 2 }, { "index": {} }, { "ip": "192.168.0.2", "date": "2020-01-01T02:01:01", "m": 3 } ], ) print(resp1) resp2 = client.search( index="node", filter_path="aggregations", aggs={ "ip": { "terms": { "field": "ip" }, "aggs": { "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "date": "desc" } } } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5b58007f10700ec7934580f034404652.asciidoc000066400000000000000000000005411506101734100257450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:579 [source, python] ---- resp = client.create( index="my-index-000001", id="1", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b6bc085943e9189236d98b3c05ed62c.asciidoc000066400000000000000000000011051506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:44 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "25GB" } } }, "delete": { "min_age": "30d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5b7d6f1db88ca6f42c48fa3dbb4341e8.asciidoc000066400000000000000000000004431506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-mapping.asciidoc:85 [source, python] ---- resp = client.indices.get_mapping( index="*", ) print(resp) resp1 = client.indices.get_mapping( index="_all", ) print(resp1) resp2 = client.indices.get_mapping() print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5b8119b4d9a09f4643be5a5b40875c8f.asciidoc000066400000000000000000000015421506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/boolean.asciidoc:78 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "is_published": True }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "is_published": False }, ) print(resp1) resp2 = client.search( index="my-index-000001", aggs={ "publish_state": { "terms": { "field": "is_published" } } }, sort=[ "is_published" ], fields=[ { "field": "weight" } ], runtime_mappings={ "weight": { "type": "long", "script": "emit(doc['is_published'].value ? 10 : 0)" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5bb0d84185df2f276f01bb2fba709e1a.asciidoc000066400000000000000000000004211506101734100266200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1482 [source, python] ---- resp = client.eql.search( index="cluster_one:my-data-stream,cluster_two:my-data-stream", query="\n process where process.name == \"regsvr32.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5bba213a7f543190139d1a69ab2ed076.asciidoc000066400000000000000000000005351506101734100264020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-across-clusters.asciidoc:302 [source, python] ---- resp = client.esql.async_query( format="json", query="\n FROM cluster_one:my-index*,cluster_two:logs*\n | STATS COUNT(http.response.status_code) BY user.id\n | LIMIT 2\n ", include_ccs_metadata=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5bbccf103107e505c17ae59863753efd.asciidoc000066400000000000000000000003771506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-influencer.asciidoc:158 [source, python] ---- resp = client.ml.get_influencers( job_id="high_sum_total_sales", sort="influencer_score", desc=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c187ba92dd1678fda86b5eec8cc7421.asciidoc000066400000000000000000000007631506101734100266610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/script-query.asciidoc:24 [source, python] ---- resp = client.search( query={ "bool": { "filter": { "script": { "script": "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n return amount < 10;\n " } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c22172a944864a7d138decdc08558b4.asciidoc000066400000000000000000000003531506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:73 [source, python] ---- resp = client.cat.indices( index="my-data-stream", v=True, h="health,status,index,docs.count", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c249eaeb99e6aee07162128288ac1b1.asciidoc000066400000000000000000000015761506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/moving-percentiles-aggregation.asciidoc:43 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_percentile": { "percentiles": { "field": "price", "percents": [ 1, 99 ] } }, "the_movperc": { "moving_percentiles": { "buckets_path": "the_percentile", "window": 10 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c24a9a0ddbfa50628dacdb9d25f7ab0.asciidoc000066400000000000000000000005541506101734100270430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/extendedstats-aggregation.asciidoc:172 [source, python] ---- resp = client.search( index="exams", size=0, aggs={ "grades_stats": { "extended_stats": { "field": "grade", "missing": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c2f486c27bd5346e512265f93375d16.asciidoc000066400000000000000000000005341506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/range-query.asciidoc:241 [source, python] ---- resp = client.search( query={ "range": { "timestamp": { "time_zone": "+01:00", "gte": "2020-01-01T00:00:00", "lte": "now" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c6fbeac20dc23b613847f35d431ecab.asciidoc000066400000000000000000000016011506101734100266750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:578 [source, python] ---- resp = client.search( query={ "function_score": { "functions": [ { "gauss": { "price": { "origin": "0", "scale": "20" } } }, { "gauss": { "location": { "origin": "11, 12", "scale": "2km" } } } ], "query": { "match": { "properties": "balcony" } }, "score_mode": "multiply" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c7ece1f30267adabdb832424871900a.asciidoc000066400000000000000000000003011506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:24 [source, python] ---- resp = client.cat.allocation( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5c8ac24dd56e85d8f3f6705ec3c6dc32.asciidoc000066400000000000000000000011671506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/circle.asciidoc:27 [source, python] ---- resp = client.indices.create( index="circles", mappings={ "properties": { "circle": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.ingest.put_pipeline( id="polygonize_circles", description="translate circle to polygon", processors=[ { "circle": { "field": "circle", "error_distance": 28, "shape_type": "geo_shape" } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/5ccfd9f4698dcd7cdfbc6bad60081aab.asciidoc000066400000000000000000000003141506101734100272170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/get-dfanalytics.asciidoc:218 [source, python] ---- resp = client.ml.get_data_frame_analytics( id="loganalytics", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5cd792dff7d5891c33bef098d9338ce1.asciidoc000066400000000000000000000015221506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/store.asciidoc:20 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "title": { "type": "text", "store": True }, "date": { "type": "date", "store": True }, "content": { "type": "text" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "title": "Some short title", "date": "2015-01-01", "content": "A very long content field..." }, ) print(resp1) resp2 = client.search( index="my-index-000001", stored_fields=[ "title", "date" ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5ceb734e3affe00e2cdc29af748d95bf.asciidoc000066400000000000000000000010361506101734100270650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/inference-apis.asciidoc:114 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="small_chunk_size", inference_config={ "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1 }, "chunking_settings": { "strategy": "sentence", "max_chunk_size": 100, "sentence_overlap": 0 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5cf12cc4f98d98dc79bead7e6556679c.asciidoc000066400000000000000000000005431506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:10 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5cfab507e50d8c5182939412a9dbcdc8.asciidoc000066400000000000000000000035231506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geocentroid-aggregation.asciidoc:184 [source, python] ---- resp = client.indices.create( index="places", mappings={ "properties": { "geometry": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.bulk( index="places", refresh=True, operations=[ { "index": { "_id": 1 } }, { "name": "NEMO Science Museum", "geometry": "POINT(4.912350 52.374081)" }, { "index": { "_id": 2 } }, { "name": "Sportpark De Weeren", "geometry": { "type": "Polygon", "coordinates": [ [ [ 4.965305328369141, 52.39347642069457 ], [ 4.966979026794433, 52.391721758934835 ], [ 4.969425201416015, 52.39238958618537 ], [ 4.967944622039794, 52.39420969150824 ], [ 4.965305328369141, 52.39347642069457 ] ] ] } } ], ) print(resp1) resp2 = client.search( index="places", size="0", aggs={ "centroid": { "geo_centroid": { "field": "geometry" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5d03bb385904d20c5323885706738459.asciidoc000066400000000000000000000004651506101734100257740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/aliases.asciidoc:16 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "my-data-stream", "alias": "my-alias" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d3ee81bcf6ad57f39052c9065963cc3.asciidoc000066400000000000000000000013771506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/copy-to.asciidoc:139 [source, python] ---- resp = client.indices.create( index="test_index", mappings={ "dynamic": "strict", "properties": { "description": { "properties": { "notes": { "type": "text", "copy_to": [ "description.notes_raw" ], "analyzer": "standard", "search_analyzer": "standard" }, "notes_raw": { "type": "keyword" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d428ea66252fd252b6a8d6f47605c86.asciidoc000066400000000000000000000015211506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc:176 [source, python] ---- resp = client.indices.create( index="cjk_bigram_example", settings={ "analysis": { "analyzer": { "han_bigrams": { "tokenizer": "standard", "filter": [ "han_bigrams_filter" ] } }, "filter": { "han_bigrams_filter": { "type": "cjk_bigram", "ignored_scripts": [ "hangul", "hiragana", "katakana" ], "output_unigrams": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d5b06468c54308f52c212cca5d58fef.asciidoc000066400000000000000000000005141506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:469 [source, python] ---- resp = client.sql.query( format="json", cursor="sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWWWdrRlVfSS1TbDYtcW9lc1FJNmlYdw==:BAFmBmF1dGhvcgFmBG5hbWUBZgpwYWdlX2NvdW50AWYMcmVsZWFzZV9kYXRl+v///w8=", columnar=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d5cdbd4c5c62a90ff2a39cba4a59368.asciidoc000066400000000000000000000011631506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:610 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "elser": True, "text": True, "query_string": "where is the best mountain climbing?", "elser_fields": [ { "name": "title", "boost": 1 }, { "name": "description", "boost": 1 } ], "text_query_boost": 4, "min_score": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d689d74062cddd01a0711a2fa7f23fd.asciidoc000066400000000000000000000004061506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/network/tracers.asciidoc:92 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.transport.TransportService.tracer": "TRACE" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d7980d8c745abf7ea0fa573e818bd5b.asciidoc000066400000000000000000000013531506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:488 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "en": { "tokenizer": "standard", "filter": [ "my_shingle_filter" ] } }, "filter": { "my_shingle_filter": { "type": "shingle", "min_shingle_size": 2, "max_shingle_size": 5, "output_unigrams": False } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5d9d7b84e2fec7ecd832145cbb951cf1.asciidoc000066400000000000000000000013371506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:600 [source, python] ---- resp = client.search( size=0, aggs={ "expired_sessions": { "terms": { "field": "account_id", "include": { "partition": 0, "num_partitions": 20 }, "size": 10000, "order": { "last_access": "asc" } }, "aggs": { "last_access": { "max": { "field": "access_date" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5da6efd5b038ada64c9e853c88c1ec47.asciidoc000066400000000000000000000006151506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:114 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "brown fox", "type": "best_fields", "fields": [ "subject", "message" ], "tie_breaker": 0.3 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5daf8ede198be9b118da5bee9896cb00.asciidoc000066400000000000000000000015451506101734100270220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:333 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "flattened": { "type": "flattened" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "flattened": { "field": [ "apple", "apple", "banana", "avocado", "10", "200", "AVOCADO", "Banana", "Tangerine" ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/5dbf06ca9058843f572676fcaf587f75.asciidoc000066400000000000000000000005711506101734100264560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/variablewidthhistogram-aggregation.asciidoc:18 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "prices": { "variable_width_histogram": { "field": "price", "buckets": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5ddc26da6e163fda54f52d33b5157051.asciidoc000066400000000000000000000005451506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/search.asciidoc:9 [source, python] ---- resp = client.search( index="my-index", query={ "sparse_vector": { "field": "my_tokens", "inference_id": "my-elser-endpoint", "query": "the query string" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5deeed427f35cbaee4b8ddc45002a9d7.asciidoc000066400000000000000000000003741506101734100270620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-delete-roles.asciidoc:77 [source, python] ---- resp = client.security.bulk_delete_role( names=[ "my_admin_role", "not_an_existing_role" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5df3226fdc8f1f66ae92ba2f527af8c0.asciidoc000066400000000000000000000003361506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:52 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "my_field": 5 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5dfb23f6e36ef484f1d3271bae76a8d1.asciidoc000066400000000000000000000002461506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/recovery.asciidoc:240 [source, python] ---- resp = client.indices.recovery( human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5dfe24287bb930ad33345caf092a004b.asciidoc000066400000000000000000000004751506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/exists-query.asciidoc:56 [source, python] ---- resp = client.search( query={ "bool": { "must_not": { "exists": { "field": "user.id" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e021307d331a4483a5aa2198168451b.asciidoc000066400000000000000000000013111506101734100260670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-roles.asciidoc:189 [source, python] ---- resp = client.security.put_role( name="only_remote_access_role", remote_indices=[ { "clusters": [ "my_remote" ], "names": [ "logs*" ], "privileges": [ "read", "read_cross_cluster", "view_index_metadata" ] } ], remote_cluster=[ { "clusters": [ "my_remote" ], "privileges": [ "monitor_stats" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e099493f135ff7bd614e935c4f2bf5a.asciidoc000066400000000000000000000005161506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:88 [source, python] ---- resp = client.search( index="my-index-000001", request_cache=True, size=0, aggs={ "popular_colors": { "terms": { "field": "colors" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e124875d97c27362ae858160ae1c6d5.asciidoc000066400000000000000000000002731506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:50 [source, python] ---- resp = client.ccr.get_auto_follow_pattern() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e21dbac92f34d236a8f0cc0d3a39cdd.asciidoc000066400000000000000000000016451506101734100267660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/jwt-realm.asciidoc:411 [source, python] ---- resp = client.security.put_role_mapping( name="jwt1_users", refresh=True, roles=[ "user" ], rules={ "all": [ { "field": { "realm.name": "jwt1" } }, { "field": { "username": "principalname1" } }, { "field": { "dn": "CN=Principal Name 1,DC=example.com" } }, { "field": { "groups": "group1" } }, { "field": { "metadata.jwt_claim_other": "other1" } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e2f7097eb299de553d0fa0087d70a59.asciidoc000066400000000000000000000012471506101734100264400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:748 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "sort.field": [ "username", "timestamp" ], "sort.order": [ "asc", "desc" ] } }, mappings={ "properties": { "username": { "type": "keyword", "doc_values": True }, "timestamp": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e3673bcbef5731746e400c4f3fe134d.asciidoc000066400000000000000000000010631506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:262 [source, python] ---- resp = client.index( index="test", id="1", document={ "location": [ { "coordinates": [ 46.25, 20.14 ], "type": "point" }, { "coordinates": [ 47.49, 19.04 ], "type": "point" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e415c490a46358643ee2aab554b4876.asciidoc000066400000000000000000000005361506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:63 [source, python] ---- resp = client.cluster.allocation_explain( filter_path="index,node_allocation_decisions.node_name,node_allocation_decisions.deciders.*", index="my-index", shard=0, primary=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e47a407b6ca29dadf6eac5ab1d71163.asciidoc000066400000000000000000000016301506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-polygon-query.asciidoc:12 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_polygon": { "person.location": { "points": [ { "lat": 40, "lon": -70 }, { "lat": 30, "lon": -80 }, { "lat": 20, "lon": -90 } ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e6419bc3e2db0d0f05bce58d8cc9215.asciidoc000066400000000000000000000015001506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:669 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "rename": { "description": "Rename 'provider' to 'cloud.provider'", "field": "provider", "target_field": "cloud.provider", "on_failure": [ { "set": { "description": "Set 'error.message'", "field": "error.message", "value": "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", "override": False } } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e87dd38ac3a0fd59ad794005b16d13e.asciidoc000066400000000000000000000003101506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:353 [source, python] ---- resp = client.slm.get_lifecycle( policy_id="nightly-snapshots", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5e9a7845e60b79685aab59877c5fbd1a.asciidoc000066400000000000000000000004301506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/ignored-field.asciidoc:51 [source, python] ---- resp = client.search( aggs={ "ignored_fields": { "terms": { "field": "_ignored" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5ea9da129ca70a5fe534f27a82d80b29.asciidoc000066400000000000000000000005471506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:681 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "comment": { "type": "text", "index_options": "offsets" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f031b7bd2b7d98d2d10df7420d269ff.asciidoc000066400000000000000000000003211506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:407 [source, python] ---- resp = client.indices.resolve_index( name="new-data-stream*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f16358ebb5d14b86f57612d5f92d923.asciidoc000066400000000000000000000005121506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:26 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "inference_field": { "type": "semantic_text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f1ed9cfdc149763b444acfbe10b0e16.asciidoc000066400000000000000000000005311506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:271 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "user_id": { "type": "keyword", "ignore_above": 20 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f3373887e8d3dc31239b687a5151449.asciidoc000066400000000000000000000012471506101734100261460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/coerce.asciidoc:19 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "number_one": { "type": "integer" }, "number_two": { "type": "integer", "coerce": False } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "number_one": "10" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "number_two": "10" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/5f3549ac7fee94682ca0d7439eebdd2a.asciidoc000066400000000000000000000005041506101734100267330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:235 [source, python] ---- resp = client.search( index="index_long,index_double", sort=[ { "field": { "numeric_type": "date_nanos" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f72ab800c3db9d118df95e2a378d411.asciidoc000066400000000000000000000003351506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/alias-privileges.asciidoc:59 [source, python] ---- resp = client.get( index=".ds-my-data-stream-2099.03.09-000003", id="2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f7b59d4fad0bdce6b09abb520ddb51d.asciidoc000066400000000000000000000023071506101734100271240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/use-elasticsearch-for-time-series-data.asciidoc:101 [source, python] ---- resp = client.search( index="my-data-stream", runtime_mappings={ "source.ip": { "type": "ip", "script": "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n " } }, query={ "bool": { "filter": [ { "range": { "@timestamp": { "gte": "now-1d/d", "lt": "now/d" } } }, { "range": { "source.ip": { "gte": "192.0.2.0", "lte": "192.0.2.255" } } } ] } }, fields=[ "*" ], source=False, sort=[ { "@timestamp": "desc" }, { "source.ip": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f8acd1e367b048b5542dbc6079bcc88.asciidoc000066400000000000000000000016311506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc:144 [source, python] ---- resp = client.indices.create( index="hyphenation_decompound_example", settings={ "analysis": { "analyzer": { "standard_hyphenation_decompound": { "tokenizer": "standard", "filter": [ "22_char_hyphenation_decompound" ] } }, "filter": { "22_char_hyphenation_decompound": { "type": "hyphenation_decompounder", "word_list_path": "analysis/example_word_list.txt", "hyphenation_patterns_path": "analysis/hyphenation_patterns.xml", "max_subword_size": 22 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5f8fb5513d4f725434db2f517ad4298f.asciidoc000066400000000000000000000015531506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:359 [source, python] ---- resp = client.indices.create( index="index", settings={ "number_of_shards": 1, "similarity": { "scripted_tfidf": { "type": "scripted", "weight_script": { "source": "double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; return query.boost * idf;" }, "script": { "source": "double tf = Math.sqrt(doc.freq); double norm = 1/Math.sqrt(doc.length); return weight * tf * norm;" } } } }, mappings={ "properties": { "field": { "type": "text", "similarity": "scripted_tfidf" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5faa121e00a0582160b2adb2b72fed67.asciidoc000066400000000000000000000003211506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-settings.asciidoc:98 [source, python] ---- resp = client.indices.get_settings( index="log_2099_-*", name="index.number_*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5fca6671bc8eaddc44ac488d1c3c6909.asciidoc000066400000000000000000000003161506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-calendar.asciidoc:95 [source, python] ---- resp = client.ml.get_calendars( calendar_id="planned-outages", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5fd002a018c589eb73fadad25889dbe9.asciidoc000066400000000000000000000031361506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-using-query-rules.asciidoc:122 [source, python] ---- resp = client.query_rules.put_ruleset( ruleset_id="my-ruleset", rules=[ { "rule_id": "rule1", "type": "pinned", "criteria": [ { "type": "fuzzy", "metadata": "query_string", "values": [ "puggles", "pugs" ] }, { "type": "exact", "metadata": "user_country", "values": [ "us" ] } ], "actions": { "ids": [ "id1", "id2" ] } }, { "rule_id": "rule2", "type": "exclude", "criteria": [ { "type": "contains", "metadata": "query_string", "values": [ "beagles" ] } ], "actions": { "docs": [ { "_index": "my-index-000001", "_id": "id3" }, { "_index": "my-index-000002", "_id": "id4" } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5fde0d78e9b2cc0519f8a63848ed344e.asciidoc000066400000000000000000000003141506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/get-query-ruleset.asciidoc:108 [source, python] ---- resp = client.query_rules.get_ruleset( ruleset_id="my-ruleset", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/5ffe6fd303400e8678fa1ead291e237f.asciidoc000066400000000000000000000005221506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:30 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/600d33c80f8872dda85c87ed41da95fd.asciidoc000066400000000000000000000010611506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:343 [source, python] ---- resp = client.search( index="azure-ai-studio-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "azure_ai_studio_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6013ed65d2058da5ce704b47a504b60a.asciidoc000066400000000000000000000016171506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:222 [source, python] ---- resp = client.bulk( index="test", refresh=True, operations=[ { "index": {} }, { "s": 1, "m": 3.1415 }, { "index": {} }, { "s": 2, "m": 1 }, { "index": {} }, { "s": 3, "m": 2.71828 } ], ) print(resp) resp1 = client.search( index="test", filter_path="aggregations", aggs={ "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "s": "desc" }, "size": 3 } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/601ad3b0ceccb3fcd282e5ec36748954.asciidoc000066400000000000000000000003631506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-service-credentials.asciidoc:64 [source, python] ---- resp = client.security.get_service_credentials( namespace="elastic", service="fleet-server", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60299454aa19fec15a604a0dd06fe522.asciidoc000066400000000000000000000004461506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/increase-other-node-capacity.asciidoc:27 [source, python] ---- resp = client.cluster.get_settings( include_defaults=True, filter_path="*.cluster.routing.allocation.disk.watermark.high*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/602e04051c092cf77de2f75a563661b8.asciidoc000066400000000000000000000002211506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat.asciidoc:63 [source, python] ---- resp = client.cat.master( help=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/604da59fe41160efa10a846a9dacc07a.asciidoc000066400000000000000000000003561506101734100266230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/get-async-eql-status-api.asciidoc:25 [source, python] ---- resp = client.eql.get_status( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6061aadb3b870791278212d1e8f52b39.asciidoc000066400000000000000000000002651506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/common/apis/get-ml-memory.asciidoc:234 [source, python] ---- resp = client.ml.get_memory_stats( human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/608cadc6b8a3f194612b69279ccc96de.asciidoc000066400000000000000000000024261506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:728 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"script_score\": {\n \"query\": {\n \"bool\": {\n \"filter\": {\n \"range\": {\n \"{{field}}\": {\n \"{{operator}}\": {{value}}\n }\n }\n }\n }\n },\n \"script\": {\n \"source\": \"cosineSimilarity({{#toJson}}query_vector{{/toJson}}, '{{dense_vector_field}}') + 1.0\"\n }\n }\n }\n }\n ", "params": { "field": "price", "operator": "gte", "value": 1000, "dense_vector_field": "product-vector", "query_vector": [] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6097ae69c64454a92a89ef01b994e9f9.asciidoc000066400000000000000000000004061506101734100264020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/put-synonym-rule.asciidoc:151 [source, python] ---- resp = client.synonyms.put_synonym_rule( set_id="my-synonyms-set", rule_id="test-1", synonyms="hello => hi => howdy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60a9aa5dcde9023901f6ff27231a10c4.asciidoc000066400000000000000000000010471506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significanttext-aggregation.asciidoc:417 [source, python] ---- resp = client.search( index="news", query={ "match": { "content": "madrid" } }, aggs={ "tags": { "significant_text": { "field": "content", "background_filter": { "term": { "content": "spain" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60b0fc1b6ae418621ff1b31591fa1fce.asciidoc000066400000000000000000000003341506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:280 [source, python] ---- resp = client.watcher.delete_watch( id="cluster_health_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60cab62af1540db2ad3b696b0ee1d7a8.asciidoc000066400000000000000000000005311506101734100266720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:165 [source, python] ---- resp = client.search( index="queries", query={ "percolate": { "field": "query", "document": { "body": "fox jumps over the lazy dog" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60d3f9a99cc91b43aaa7524a9a74dba0.asciidoc000066400000000000000000000003051506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/rejected-requests.asciidoc:50 [source, python] ---- resp = client.nodes.stats( metric="breaker", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/60f889fbed5df3185444f7015b48ed76.asciidoc000066400000000000000000000002641506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:16 [source, python] ---- resp = client.indices.create( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/610f629d0486a64546d62402a0a5e00f.asciidoc000066400000000000000000000005171506101734100261630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-syntax.asciidoc:296 [source, python] ---- resp = client.search( index="my-index-000001", query={ "query_string": { "query": "kimchy\\!", "fields": [ "user.id" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/612c2e975f833de9815651135735eae5.asciidoc000066400000000000000000000003041506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:253 [source, python] ---- resp = client.tasks.cancel( nodes="nodeId1,nodeId2", actions="*reindex", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/615dc36f0978c676624fb7d1144b4899.asciidoc000066400000000000000000000003451506101734100262310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/get-lifecycle-stats.asciidoc:69 [source, python] ---- resp = client.indices.get_data_lifecycle_stats( human=True, pretty=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/618c9d42284c067891fb57034a4fd834.asciidoc000066400000000000000000000002531506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/start-job.asciidoc:56 [source, python] ---- resp = client.rollup.start_job( id="sensor", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/61bf6ac15ae3e22323454a9a2872a2fa.asciidoc000066400000000000000000000005071506101734100264560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cardinality-aggregation.asciidoc:13 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "type_count": { "cardinality": { "field": "type" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/61c49cee90c6aa0eafbdd5cc03936e7d.asciidoc000066400000000000000000000003301506101734100270500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic-mapping.asciidoc:11 [source, python] ---- resp = client.index( index="data", id="1", document={ "count": 5 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/61d6b9503459914c436930c3ae87d454.asciidoc000066400000000000000000000003201506101734100261250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/list-query-rulesets.asciidoc:171 [source, python] ---- resp = client.query_rules.list_rulesets( from_="0", size="3", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/61e38e95191f4dde791070c6fce8a092.asciidoc000066400000000000000000000014371506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:546 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.holt(values, 0.3, 0.1)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/621665fdbd7fc103c09bfeed28b67b1a.asciidoc000066400000000000000000000002561506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:150 [source, python] ---- resp = client.count( filter_path="-_shards", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/621f4553e24592d40c8cdbbdfaeb027e.asciidoc000066400000000000000000000010061506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:387 [source, python] ---- resp = client.search( index="image-index", knn={ "field": "image-vector", "query_vector": [ 54, 10, -2 ], "k": 5, "num_candidates": 50, "filter": { "term": { "file-type": "png" } } }, fields=[ "title" ], source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6220087321e6d288024a70c6b09bd720.asciidoc000066400000000000000000000005151506101734100261000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:358 [source, python] ---- resp = client.index( index="my-index-000001", id="4", refresh=True, document={ "query": { "match": { "message": "lazy dog" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6244204213f60edf2f23295f9059f2c9.asciidoc000066400000000000000000000002661506101734100262070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/stats.asciidoc:169 [source, python] ---- resp = client.watcher.stats( metric="queued_watches", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/624e69dedf42c4877234b87ec1d00068.asciidoc000066400000000000000000000003561506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/snapshot/repeated-snapshot-failures.asciidoc:105 [source, python] ---- resp = client.slm.get_lifecycle( policy_id="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/62c311e7ab4de8b79e532929a5069975.asciidoc000066400000000000000000000027261506101734100263100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-features.asciidoc:16 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "topics": { "type": "rank_features" }, "negative_reviews": { "type": "rank_features", "positive_score_impact": False } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "topics": { "politics": 20, "economics": 50.8 }, "negative_reviews": { "1star": 10, "2star": 100 } }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "topics": { "politics": 5.2, "sports": 80.1 }, "negative_reviews": { "1star": 1, "2star": 10 } }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "rank_feature": { "field": "topics.politics" } }, ) print(resp3) resp4 = client.search( index="my-index-000001", query={ "rank_feature": { "field": "negative_reviews.1star" } }, ) print(resp4) resp5 = client.search( index="my-index-000001", query={ "term": { "topics": "economics" } }, ) print(resp5) ---- python-elasticsearch-9.1.1/docs/examples/62ccee6ad356428c2d625742f961ceb7.asciidoc000066400000000000000000000003451506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-api-key.asciidoc:206 [source, python] ---- resp = client.security.update_api_key( id="VuaCfGcBCdbkQm-e5aOx", role_descriptors={}, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/62d3c8fccb11471bdc12555c1a7777f2.asciidoc000066400000000000000000000005221506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:93 [source, python] ---- resp = client.index( index="idx", id="1", document={ "foo": [ { "bar": 1 }, { "baz": 2 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/62eafc5b3ab75cc67314d5a8567d6077.asciidoc000066400000000000000000000002771506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:231 [source, python] ---- resp = client.security.get_api_key( username="myuser", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/62f1ec1bb5cc5a9c2efd536a7474f549.asciidoc000066400000000000000000000005241506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/hunspell-tokenfilter.asciidoc:73 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "hunspell", "locale": "en_US" } ], text="the foxes jumping quickly", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/630d127ccedd25a6cff31ea098ac2847.asciidoc000066400000000000000000000015061506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/t-test-aggregation.asciidoc:86 [source, python] ---- resp = client.search( index="node_upgrade", size=0, aggs={ "startup_time_ttest": { "t_test": { "a": { "field": "startup_time_before", "filter": { "term": { "group": "A" } } }, "b": { "field": "startup_time_before", "filter": { "term": { "group": "B" } } }, "type": "heteroscedastic" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6326f5c6fd2a6e6b1aff9a643b94f455.asciidoc000066400000000000000000000015771506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/fields.asciidoc:50 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "text": "quick brown fox", "popularity": 1 }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "text": "quick fox", "popularity": 5 }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "function_score": { "query": { "match": { "text": "quick brown fox" } }, "script_score": { "script": { "lang": "expression", "source": "_score * doc['popularity']" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/6329fb2840a4373ff6d342f2653247cb.asciidoc000066400000000000000000000002661506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:299 [source, python] ---- resp = client.indices.get_mapping( index="books", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/634ecacf14b83c5f0bb8b6273cf6418e.asciidoc000066400000000000000000000035071506101734100266430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-security.asciidoc:128 [source, python] ---- resp = client.search_application.put( name="website-product-search", search_application={ "indices": [ "website-products" ], "template": { "script": { "source": { "query": { "term": { "{{field_name}}": "{{field_value}}" } }, "aggs": { "color_facet": { "terms": { "field": "color", "size": "{{agg_size}}" } } } }, "params": { "field_name": "product_name", "field_value": "hello world", "agg_size": 5 } }, "dictionary": { "properties": { "field_name": { "type": "string", "enum": [ "name", "color", "description" ] }, "field_value": { "type": "string" }, "agg_size": { "type": "integer", "minimum": 1, "maximum": 10 } }, "required": [ "field_name" ], "additionalProperties": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63521e0089c631d6668c44a0a9d7fdcc.asciidoc000066400000000000000000000012671506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc:123 [source, python] ---- resp = client.indices.create( index="custom_limit_example", settings={ "analysis": { "analyzer": { "whitespace_five_token_limit": { "tokenizer": "whitespace", "filter": [ "five_token_limit" ] } }, "filter": { "five_token_limit": { "type": "limit", "max_token_count": 5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6352e846bb83725ae6d853aa64d8697d.asciidoc000066400000000000000000000010521506101734100263620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:158 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "12km", "pin.location": { "lat": 40, "lon": -70 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6365312d470426cab1b77e9ffde49170.asciidoc000066400000000000000000000006651506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/document-level-security.asciidoc:30 [source, python] ---- resp = client.security.put_role( name="click_role", indices=[ { "names": [ "events-*" ], "privileges": [ "read" ], "query": "{\"match\": {\"category\": \"click\"}}" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/636ee2066450605247ec1f68d04b8ee4.asciidoc000066400000000000000000000004431506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1465 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "http.clientip": "40.135.0.0" } }, fields=[ "*" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63893e7e9479a9b60db71dcddcc79aaf.asciidoc000066400000000000000000000003231506101734100267440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-calendar.asciidoc:44 [source, python] ---- resp = client.ml.delete_calendar( calendar_id="planned-outages", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63a53fcb0717ae9033a679cbfc932851.asciidoc000066400000000000000000000010651506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-alibabacloud-ai-search.asciidoc:174 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="alibabacloud_ai_search_completion", inference_config={ "service": "alibabacloud-ai-search", "service_settings": { "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", "api_key": "{{API_KEY}}", "service_id": "ops-qwen-turbo", "workspace": "default" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63bf3480627a89b4b4ede4150e1d6bc0.asciidoc000066400000000000000000000045311506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-create-roles.asciidoc:125 [source, python] ---- resp = client.security.bulk_put_role( roles={ "my_admin_role": { "cluster": [ "all" ], "indices": [ { "names": [ "index1", "index2" ], "privileges": [ "all" ], "field_security": { "grant": [ "title", "body" ] }, "query": "{\"match\": {\"title\": \"foo\"}}" } ], "applications": [ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], "run_as": [ "other_user" ], "metadata": { "version": 1 } }, "my_user_role": { "cluster": [ "all" ], "indices": [ { "names": [ "index1" ], "privileges": [ "read" ], "field_security": { "grant": [ "title", "body" ] }, "query": "{\"match\": {\"title\": \"foo\"}}" } ], "applications": [ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], "run_as": [ "other_user" ], "metadata": { "version": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63cc960215ae83b359c12df3c0993bfa.asciidoc000066400000000000000000000004571506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:136 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "number_of_shards": 3, "number_of_replicas": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63e20883732ec30b5400046be2efb0f1.asciidoc000066400000000000000000000002551506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/flush.asciidoc:127 [source, python] ---- resp = client.indices.flush( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/63ecdab34940af053acc409164914c32.asciidoc000066400000000000000000000033531506101734100264020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/sparse-vector.asciidoc:63 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text": { "type": "text", "analyzer": "standard" }, "impact": { "type": "sparse_vector" }, "positive": { "type": "sparse_vector" }, "negative": { "type": "sparse_vector" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", document={ "text": "I had some terribly delicious carrots.", "impact": [ { "I": 0.55, "had": 0.4, "some": 0.28, "terribly": 0.01, "delicious": 1.2, "carrots": 0.8 }, { "I": 0.54, "had": 0.4, "some": 0.28, "terribly": 2.01, "delicious": 0.02, "carrots": 0.4 } ], "positive": { "I": 0.55, "had": 0.4, "some": 0.28, "terribly": 0.01, "delicious": 1.2, "carrots": 0.8 }, "negative": { "I": 0.54, "had": 0.4, "some": 0.28, "terribly": 2.01, "delicious": 0.02, "carrots": 0.4 } }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "term": { "impact": { "value": "delicious" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/640621cea39cdeeb76fbc95bff31a18d.asciidoc000066400000000000000000000013211506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-last-sync-api.asciidoc:122 [source, python] ---- resp = client.connector.last_sync( connector_id="my-connector", last_access_control_sync_error="Houston, we have a problem!", last_access_control_sync_scheduled_at="2023-11-09T15:13:08.231Z", last_access_control_sync_status="pending", last_deleted_document_count=42, last_incremental_sync_scheduled_at="2023-11-09T15:13:08.231Z", last_indexed_document_count=42, last_sync_error="Houston, we have a problem!", last_sync_scheduled_at="2024-11-09T15:13:08.231Z", last_sync_status="completed", last_synced="2024-11-09T15:13:08.231Z", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/640a89d0b39630269433425ff476faf3.asciidoc000066400000000000000000000003251506101734100262100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // upgrade/archived-settings.asciidoc:32 [source, python] ---- resp = client.cluster.put_settings( persistent={ "archived.*": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/640da6dd719a34975b5627dfa5fcdd55.asciidoc000066400000000000000000000003671506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:487 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.monitoring.collection.enabled": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/640dbeecb736bd25f6f2b392b76a7531.asciidoc000066400000000000000000000002531506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/stats.asciidoc:1914 [source, python] ---- resp = client.cluster.stats( include_remotes=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/640e4f2c2d29f9851320a70927bd7a6c.asciidoc000066400000000000000000000003571506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-with-existing-indices.asciidoc:185 [source, python] ---- resp = client.cluster.put_settings( persistent={ "indices.lifecycle.poll_interval": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/641009f2147e1ca56215c701f45c970b.asciidoc000066400000000000000000000010261506101734100261610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geotilegrid-aggregation.asciidoc:185 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "tiles-in-bounds": { "geotile_grid": { "field": "location", "precision": 22, "bounds": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6414b9276ba1c63898c3ff5cbe03c54e.asciidoc000066400000000000000000000002251506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/segments.asciidoc:134 [source, python] ---- resp = client.indices.segments() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/641f75862c70e25e79d249d9e0a79f03.asciidoc000066400000000000000000000015061506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:41 [source, python] ---- resp = client.search( index="my-index-000001", query={ "nested": { "path": "obj1", "query": { "bool": { "must": [ { "match": { "obj1.name": "blue" } }, { "range": { "obj1.count": { "gt": 5 } } } ] } }, "score_mode": "avg" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/642161d70dacf7d153767d37d3726838.asciidoc000066400000000000000000000003051506101734100262100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-index-caps.asciidoc:171 [source, python] ---- resp = client.rollup.get_rollup_index_caps( index="*_rollup", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/642c0c1c76e9bf226cd216ebae9ab958.asciidoc000066400000000000000000000022041506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keep-words-tokenfilter.asciidoc:118 [source, python] ---- resp = client.indices.create( index="keep_words_example", settings={ "analysis": { "analyzer": { "standard_keep_word_array": { "tokenizer": "standard", "filter": [ "keep_word_array" ] }, "standard_keep_word_file": { "tokenizer": "standard", "filter": [ "keep_word_file" ] } }, "filter": { "keep_word_array": { "type": "keep", "keep_words": [ "one", "two", "three" ] }, "keep_word_file": { "type": "keep", "keep_words_path": "analysis/example_word_list.txt" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/643b9506d1129d5215f9a1bb0b509aba.asciidoc000066400000000000000000000014141506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:316 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "full_name": { "path_match": "name.*", "path_unmatch": "*.middle", "mapping": { "type": "text", "copy_to": "full_name" } } } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "name": { "first": "John", "middle": "Winston", "last": "Lennon" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/643e19c3b6ac1134554dd890e2249c2b.asciidoc000066400000000000000000000005521506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/logs.asciidoc:20 [source, python] ---- resp = client.indices.put_index_template( name="my-index-template", index_patterns=[ "logs-*" ], data_stream={}, template={ "settings": { "index.mode": "logsdb" } }, priority=101, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/645433e8e479e5d71c100f66dd2de5d0.asciidoc000066400000000000000000000444041506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:256 [source, python] ---- resp = client.bulk( index="my-data-stream", refresh=True, pipeline="my-timestamp-pipeline", operations=[ { "create": {} }, { "@timestamp": "2022-06-21T15:49:00Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 91153, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 463314616 }, "usage": { "bytes": 307007078, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 585236 }, "rss": { "bytes": 102728 }, "pagefaults": 120901, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:45:50Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 124501, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 982546514 }, "usage": { "bytes": 360035574, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1339884 }, "rss": { "bytes": 381174 }, "pagefaults": 178473, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:50Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 38907, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 862723768 }, "usage": { "bytes": 379572388, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 431227 }, "rss": { "bytes": 386580 }, "pagefaults": 233166, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:40Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 86706, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 567160996 }, "usage": { "bytes": 103266017, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1724908 }, "rss": { "bytes": 105431 }, "pagefaults": 233166, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:44:00Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 150069, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 639054643 }, "usage": { "bytes": 265142477, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1786511 }, "rss": { "bytes": 189235 }, "pagefaults": 138172, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:42:40Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 82260, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 854735585 }, "usage": { "bytes": 309798052, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 924058 }, "rss": { "bytes": 110838 }, "pagefaults": 259073, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:42:10Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 153404, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 279586406 }, "usage": { "bytes": 214904955, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1047265 }, "rss": { "bytes": 91914 }, "pagefaults": 302252, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:40:20Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 125613, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 822782853 }, "usage": { "bytes": 100475044, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 2109932 }, "rss": { "bytes": 278446 }, "pagefaults": 74843, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:40:10Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 100046, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 567160996 }, "usage": { "bytes": 362826547, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 1986724 }, "rss": { "bytes": 402801 }, "pagefaults": 296495, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } }, { "create": {} }, { "@timestamp": "2022-06-21T15:38:30Z", "kubernetes": { "host": "gke-apps-0", "node": "gke-apps-0-0", "pod": "gke-apps-0-0-0", "container": { "cpu": { "usage": { "nanocores": 40018, "core": { "ns": 12828317850 }, "node": { "pct": 0.0000277905 }, "limit": { "pct": 0.0000277905 } } }, "memory": { "available": { "bytes": 1062428344 }, "usage": { "bytes": 265142477, "node": { "pct": 0.01770037710617187 }, "limit": { "pct": 0.00009923134671484496 } }, "workingset": { "bytes": 2294743 }, "rss": { "bytes": 340623 }, "pagefaults": 224530, "majorpagefaults": 0 }, "start_time": "2021-03-30T07:59:06Z", "name": "container-name-44" }, "namespace": "namespace26" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64622409407316d2d47094e692d9b516.asciidoc000066400000000000000000000012141506101734100257650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:401 [source, python] ---- resp = client.ml.evaluate_data_frame( index="student_performance_mathematics_reg", query={ "term": { "ml.is_training": { "value": False } } }, evaluation={ "regression": { "actual_field": "G3", "predicted_field": "ml.G3_prediction", "metrics": { "r_squared": {}, "mse": {}, "msle": {}, "huber": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6464124d1677f4552ddddd95a340ca3a.asciidoc000066400000000000000000000012451506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:196 [source, python] ---- resp = client.index( index="library", refresh=True, document={ "title": "Book #1", "rating": 200.1 }, ) print(resp) resp1 = client.index( index="library", refresh=True, document={ "title": "Book #2", "rating": 1.7 }, ) print(resp1) resp2 = client.index( index="library", refresh=True, document={ "title": "Book #3", "rating": 0.1 }, ) print(resp2) resp3 = client.search( filter_path="hits.hits._source", source="title", sort="rating:desc", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/646d71869f1a18c5bede7759559bfc47.asciidoc000066400000000000000000000003201506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:242 [source, python] ---- resp = client.indices.get_field_mapping( index="_all", fields="message", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6490d89a4e43cac5e6b9bc19840d5478.asciidoc000066400000000000000000000004111506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/fingerprint-analyzer.asciidoc:19 [source, python] ---- resp = client.indices.analyze( analyzer="fingerprint", text="Yes yes, Gödel said this sentence is consistent and.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64a6fb4bcb8cfea139a0e5d3765c063a.asciidoc000066400000000000000000000003471506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/translate.asciidoc:9 [source, python] ---- resp = client.sql.translate( query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=10, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64a79861225553799b26e118d7851dcc.asciidoc000066400000000000000000000002711506101734100261430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/error-handling.asciidoc:61 [source, python] ---- resp = client.ilm.explain_lifecycle( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64aff98cf477555e7411714c17006572.asciidoc000066400000000000000000000004541506101734100261400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/range-query.asciidoc:150 [source, python] ---- resp = client.search( query={ "range": { "timestamp": { "gte": "now-1d/d", "lte": "now/d" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64c572abc23394a77b6cca0b5368ee1d.asciidoc000066400000000000000000000002471506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // features/apis/get-features-api.asciidoc:18 [source, python] ---- resp = client.features.get_features() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64c804869ddfbcb9075817d0bbf71b5c.asciidoc000066400000000000000000000010451506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:592 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "elser": True, "query_string": "where is the best mountain climbing?", "elser_fields": [ { "name": "title", "boost": 1 }, { "name": "description", "boost": 1 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64ca2ccb79a8f4add5b8fe2d3322ae92.asciidoc000066400000000000000000000004671506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/avg-aggregation.asciidoc:13 [source, python] ---- resp = client.search( index="exams", size="0", aggs={ "avg_grade": { "avg": { "field": "grade" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64d24f4b2a57dba48092dafe3eb68ad1.asciidoc000066400000000000000000000006071506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:245 [source, python] ---- resp = client.mget( index="test", stored_fields="field1,field2", docs=[ { "_id": "1" }, { "_id": "2", "stored_fields": [ "field3", "field4" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/64ffaa6814ec1ec4f59b8f33b47cffb4.asciidoc000066400000000000000000000003511506101734100270110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // upgrade/archived-settings.asciidoc:73 [source, python] ---- resp = client.indices.put_settings( index="my-index", settings={ "archived.*": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/650a0fb27c66a790c4687267423af1da.asciidoc000066400000000000000000000006731506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:104 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "remove": { "index": "logs-nginx.access-prod", "alias": "logs" } }, { "add": { "index": "logs-my_app-default", "alias": "logs" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6521c3578dc4ad4a6db697700986e78e.asciidoc000066400000000000000000000016651506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:315 [source, python] ---- resp = client.search( index="place", pretty=True, suggest={ "place_suggestion": { "prefix": "tim", "completion": { "field": "suggest", "size": 10, "contexts": { "location": [ { "lat": 43.6624803, "lon": -79.3863353, "precision": 2 }, { "context": { "lat": 43.6624803, "lon": -79.3863353 }, "boost": 2 } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/653c0d0ef146c997ef6bc6450d4f5f94.asciidoc000066400000000000000000000010121506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:700 [source, python] ---- resp = client.search( aggs={ "actors": { "terms": { "field": "actors", "size": 10 }, "aggs": { "costars": { "terms": { "field": "actors", "size": 5 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/654882f545eca8d7047695f867c63072.asciidoc000066400000000000000000000003221506101734100261530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/stop-transform.asciidoc:87 [source, python] ---- resp = client.transform.stop_transform( transform_id="ecommerce_transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/65578c390837cb4c0fcc77fb17857714.asciidoc000066400000000000000000000011771506101734100263120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline.asciidoc:92 [source, python] ---- resp = client.search( aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "max_monthly_sales": { "max_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/657cf67bbc48f3b8c7fa15e275a5ef72.asciidoc000066400000000000000000000006241506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/ignore-missing-component-templates.asciidoc:14 [source, python] ---- resp = client.cluster.put_component_template( name="logs-foo_component1", template={ "mappings": { "properties": { "host.name": { "type": "keyword" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/658842bf41e0fcb7969937155946a0ff.asciidoc000066400000000000000000000006301506101734100263100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:157 [source, python] ---- resp = client.security.put_role( name="slm-read-only", cluster=[ "read_slm" ], indices=[ { "names": [ ".slm-history-*" ], "privileges": [ "read" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/65b6185356f16f2f0d84bc5aee2ed0fc.asciidoc000066400000000000000000000005471506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:26 [source, python] ---- resp = client.search( query={ "sparse_vector": { "field": "ml.tokens", "inference_id": "the inference ID to produce the token weights", "query": "the query string" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/65c671fbecdb5b0d75c13d63f87e36f0.asciidoc000066400000000000000000000013721506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geodistance-aggregation.asciidoc:149 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "rings_around_amsterdam": { "geo_distance": { "field": "location", "origin": "POINT (4.894 52.3760)", "ranges": [ { "to": 100000 }, { "from": 100000, "to": 300000 }, { "from": 300000 } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6606d46685d10377b996b5f20f1229b5.asciidoc000066400000000000000000000004131506101734100261310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-index-name-api.asciidoc:82 [source, python] ---- resp = client.connector.update_index_name( connector_id="my-connector", index_name="data-from-my-google-drive", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6636701d31b0c9eb8316f1f8e99cc918.asciidoc000066400000000000000000000013501506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/scripted-metric-aggregation.asciidoc:19 [source, python] ---- resp = client.search( index="ledger", size="0", query={ "match_all": {} }, aggs={ "profit": { "scripted_metric": { "init_script": "state.transactions = []", "map_script": "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", "combine_script": "double profit = 0; for (t in state.transactions) { profit += t } return profit", "reduce_script": "double profit = 0; for (a in states) { profit += a } return profit" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/66539dc6011dd2e0282cf81db1f3df27.asciidoc000066400000000000000000000002431506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat.asciidoc:91 [source, python] ---- resp = client.cat.nodes( h="ip,port,heapPercent,name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/666c420fe61fa122386da3c356a64943.asciidoc000066400000000000000000000010751506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:602 [source, python] ---- resp = client.search( query={ "term": { "user": "kimchy" } }, sort={ "_script": { "type": "number", "script": { "lang": "painless", "source": "doc['field_name'].value * params.factor", "params": { "factor": 1.1 } }, "order": "asc" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6689aa213884196b47a6f482d4993749.asciidoc000066400000000000000000000007141506101734100261020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/put-pipeline.asciidoc:17 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline-id", description="My optional pipeline description", processors=[ { "set": { "description": "My optional processor description", "field": "my-keyword-field", "value": "foo" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/66915e95b723ee2f6e5164a94b8f98c1.asciidoc000066400000000000000000000003651506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/create-index-from-source.asciidoc:85 [source, python] ---- resp = client.indices.create_from( source="my-index", dest="my-new-index", create_from=None, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6693f0ffa0de3229b5dedda197810e70.asciidoc000066400000000000000000000003511506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1368 [source, python] ---- resp = client.eql.get( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", keep_alive="5d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/669773766b041be768003055ad523038.asciidoc000066400000000000000000000003351506101734100257620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/alias-privileges.asciidoc:47 [source, python] ---- resp = client.get( index=".ds-my-data-stream-2099.03.08-000002", id="2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6705eca2095ade294548cfb25bf2dd86.asciidoc000066400000000000000000000004171506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/diagnose-unassigned-shards.asciidoc:166 [source, python] ---- resp = client.cat.shards( v=True, h="index,shard,prirep,state,node,unassigned.reason", s="state", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/672d30eb3af573140d966e88b14814f8.asciidoc000066400000000000000000000004341506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/date-index-name.asciidoc:43 [source, python] ---- resp = client.index( index="my-index", id="1", pipeline="monthlyindex", document={ "date1": "2016-04-25T12:02:01.789Z" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6742a8cd0b7b4c1c325ce2f22faf6cb4.asciidoc000066400000000000000000000007161506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/categorize-text-aggregation.asciidoc:213 [source, python] ---- resp = client.search( index="log-messages", filter_path="aggregations", aggs={ "categories": { "categorize_text": { "field": "message", "categorization_filters": [ "\\w+\\_\\d{3}" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/674bb755111c6fbaa4c5ac759395c122.asciidoc000066400000000000000000000005621506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:132 [source, python] ---- resp = client.indices.get_settings( index="my-index", flat_settings=True, include_defaults=True, ) print(resp) resp1 = client.cluster.get_settings( flat_settings=True, include_defaults=True, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/67967388db610dcb9d24fb59ede348d8.asciidoc000066400000000000000000000004671506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/min-aggregation.asciidoc:17 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "min_price": { "min": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67a1f31cf60773a2378c2c30723c4b96.asciidoc000066400000000000000000000007321506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-rank-aggregation.asciidoc:216 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_ranks": { "percentile_ranks": { "field": "load_time", "values": [ 500, 600 ], "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67a490d749a0c3bb16a266663423893d.asciidoc000066400000000000000000000002731506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:202 [source, python] ---- resp = client.watcher.delete_watch( id="log_error_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67a55ac3aaee09f4aeeb7d2763da3335.asciidoc000066400000000000000000000035171506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geobounds-aggregation.asciidoc:104 [source, python] ---- resp = client.indices.create( index="places", mappings={ "properties": { "geometry": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.bulk( index="places", refresh=True, operations=[ { "index": { "_id": 1 } }, { "name": "NEMO Science Museum", "geometry": "POINT(4.912350 52.374081)" }, { "index": { "_id": 2 } }, { "name": "Sportpark De Weeren", "geometry": { "type": "Polygon", "coordinates": [ [ [ 4.965305328369141, 52.39347642069457 ], [ 4.966979026794433, 52.391721758934835 ], [ 4.969425201416015, 52.39238958618537 ], [ 4.967944622039794, 52.39420969150824 ], [ 4.965305328369141, 52.39347642069457 ] ] ] } } ], ) print(resp1) resp2 = client.search( index="places", size="0", aggs={ "viewport": { "geo_bounds": { "field": "geometry" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/67aac8882fa476db8a5878b67ea08eb3.asciidoc000066400000000000000000000005121506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/repo-analysis-api.asciidoc:32 [source, python] ---- resp = client.perform_request( "POST", "/_snapshot/my_repository/_analyze", params={ "blob_count": "10", "max_blob_size": "1mb", "timeout": "120s" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67b71a95b6fe6c83faae51ea038a1bf1.asciidoc000066400000000000000000000003521506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:407 [source, python] ---- resp = client.esql.async_query_delete( id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67bab07fda27ef77e3bc948211051a33.asciidoc000066400000000000000000000004071506101734100264700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:160 [source, python] ---- resp = client.cat.thread_pool( thread_pool_patterns="write,search", v=True, s="n,nn", h="n,nn,q,a,r,c", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67c3808751223eef69a57e6fd02ddf4f.asciidoc000066400000000000000000000012361506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/mlt-query.asciidoc:38 [source, python] ---- resp = client.search( query={ "more_like_this": { "fields": [ "title", "description" ], "like": [ { "_index": "imdb", "_id": "1" }, { "_index": "imdb", "_id": "2" }, "and potentially some more text here as well" ], "min_term_freq": 1, "max_query_terms": 12 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/67ffa135c50c43d6788636c88078c7d1.asciidoc000066400000000000000000000007561506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-pipeline.asciidoc:156 [source, python] ---- resp = client.ingest.simulate( id="my-pipeline-id", docs=[ { "_index": "index", "_id": "id", "_source": { "foo": "bar" } }, { "_index": "index", "_id": "id", "_source": { "foo": "rab" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/682336e5232c9ad3d866cb203d1c58c1.asciidoc000066400000000000000000000010371506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:135 [source, python] ---- resp = client.indices.create( index="azure-openai-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1536, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6843d859e2965d17cad4f033c81db83f.asciidoc000066400000000000000000000007701506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:351 [source, python] ---- resp = client.indices.put_index_template( name="my-data-stream-template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "settings": { "sort.field": [ "@timestamp" ], "sort.order": [ "desc" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6856f7c6a732ab55ca71c1ee2ec2bbad.asciidoc000066400000000000000000000027341506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/max-aggregation.asciidoc:126 [source, python] ---- resp = client.indices.create( index="metrics_index", mappings={ "properties": { "latency_histo": { "type": "histogram" } } }, ) print(resp) resp1 = client.index( index="metrics_index", id="1", refresh=True, document={ "network.name": "net-1", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp1) resp2 = client.index( index="metrics_index", id="2", refresh=True, document={ "network.name": "net-2", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp2) resp3 = client.search( index="metrics_index", size="0", filter_path="aggregations", aggs={ "max_latency": { "max": { "field": "latency_histo" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/6859530dd9d85e59bd33a53ec96a3836.asciidoc000066400000000000000000000007521506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/match-enrich-policy-type-ex.asciidoc:20 [source, python] ---- resp = client.index( index="users", id="1", refresh="wait_for", document={ "email": "mardy.brown@asciidocsmith.com", "first_name": "Mardy", "last_name": "Brown", "city": "New Orleans", "county": "Orleans", "state": "LA", "zip": 70116, "web": "mardy.asciidocsmith.com" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/686bc640b877de845c46bef372a9866c.asciidoc000066400000000000000000000015131506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/parent-aggregation.asciidoc:95 [source, python] ---- resp = client.search( index="parent_example", size="0", aggs={ "top-names": { "terms": { "field": "owner.display_name.keyword", "size": 10 }, "aggs": { "to-questions": { "parent": { "type": "answer" }, "aggs": { "top-tags": { "terms": { "field": "tags.keyword", "size": 10 } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68721288dc9ad8aa1b55099b4d303051.asciidoc000066400000000000000000000005611506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:534 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "quick brown f", "type": "bool_prefix", "fields": [ "subject", "message" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68738b4fd0dda177022be45be95b4c84.asciidoc000066400000000000000000000003361506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:208 [source, python] ---- resp = client.reindex_rethrottle( task_id="r1A2WoRbTwKZ516z6NEs5A:36619", requests_per_second="-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6884454f57c3a41059037ea762f48d77.asciidoc000066400000000000000000000004061506101734100261440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/standard-analyzer.asciidoc:17 [source, python] ---- resp = client.indices.analyze( analyzer="standard", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68a891f609ca3a379d2d64e4914f3067.asciidoc000066400000000000000000000004171506101734100263020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/kstem-tokenfilter.asciidoc:29 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "kstem" ], text="the foxes jumping quickly", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68b64313bf89ec3f2c645da61999dbb4.asciidoc000066400000000000000000000002511506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-info.asciidoc:226 [source, python] ---- resp = client.nodes.info( node_id="plugins", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68cb8a452e780ca78b0cb761be3629af.asciidoc000066400000000000000000000004241506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:714 [source, python] ---- resp = client.search( stored_fields="_none_", query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/68d7f7d4d268ee98caead5aef19933d6.asciidoc000066400000000000000000000026751506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/tsds-reindex.asciidoc:168 [source, python] ---- resp = client.cluster.put_component_template( name="destination_template", template={ "settings": { "index": { "number_of_replicas": 0, "number_of_shards": 4, "mode": "time_series", "routing_path": [ "metricset" ], "time_series": { "end_time": "2023-09-01T14:00:00.000Z", "start_time": "2023-09-01T06:00:00.000Z" } } }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "metricset": { "type": "keyword", "time_series_dimension": True }, "k8s": { "properties": { "tx": { "type": "long" }, "rx": { "type": "long" } } } } } }, ) print(resp) resp1 = client.indices.put_index_template( name="2", index_patterns=[ "k9s*" ], composed_of=[ "destination_template" ], data_stream={}, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/691fe20d467324ed43a36fd15852c492.asciidoc000066400000000000000000000005001506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:174 [source, python] ---- resp = client.ccr.follow( index="kibana_sample_data_ecommerce", wait_for_active_shards="1", remote_cluster="clusterB", leader_index="kibana_sample_data_ecommerce2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/692606cc6d6462becc321d92961a3bac.asciidoc000066400000000000000000000005031506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // text-structure/apis/test-grok-pattern.asciidoc:60 [source, python] ---- resp = client.text_structure.test_grok_pattern( grok_pattern="Hello %{WORD:first_name} %{WORD:last_name}", text=[ "Hello John Doe", "this does not match" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69541f0bb81ab3797926bb2a00607cda.asciidoc000066400000000000000000000007011506101734100264020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:748 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="my-msmarco-minilm-model", inference_config={ "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1, "model_id": "cross-encoder__ms-marco-minilm-l-6-v2" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69582847099ee62ed34feddfaba83ef6.asciidoc000066400000000000000000000006031506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:307 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "quantity": { "histogram": { "field": "quantity", "interval": 10, "missing": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/698e0a2b67ba7842caa801d9ef46ebe3.asciidoc000066400000000000000000000010101506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:511 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "require_field_match": False, "fields": { "body": { "pre_tags": [ "" ], "post_tags": [ "" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69a08e7bdcc616f3bdcb8ae842d9e30e.asciidoc000066400000000000000000000003141506101734100270030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:360 [source, python] ---- resp = client.get( index="my-index-000001", id="1", stored_fields="tags,counter", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69ab708fe65a75f870223d2289c3d171.asciidoc000066400000000000000000000013501506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/redact.asciidoc:107 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "Hide my IP", "processors": [ { "redact": { "field": "message", "patterns": [ "%{IP:REDACTED}", "%{EMAILADDRESS:REDACTED}" ], "prefix": "*", "suffix": "*" } } ] }, docs=[ { "_source": { "message": "55.3.244.1 GET /index.html 15824 0.043 test@elastic.co" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69c07cfdf8054c301cd6186c5d71aa02.asciidoc000066400000000000000000000003061506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:350 [source, python] ---- resp = client.update_by_query( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69d5710bdec73041c66f21d5f96637e8.asciidoc000066400000000000000000000005111506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:216 [source, python] ---- resp = client.indices.create( index="index_long", mappings={ "properties": { "field": { "type": "date_nanos" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69d9b8fd364596aa37eae6864d8a6d89.asciidoc000066400000000000000000000004241506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:61 [source, python] ---- resp = client.search( index=".watcher-history*", pretty=True, sort=[ { "result.execution_time": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69daf5ec2a9bc07096e1833286c36076.asciidoc000066400000000000000000000007451506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:334 [source, python] ---- resp = client.indices.put_index_template( name="timeseries_template", index_patterns=[ "timeseries-*" ], template={ "settings": { "number_of_shards": 1, "number_of_replicas": 1, "index.lifecycle.name": "timeseries_policy", "index.lifecycle.rollover_alias": "timeseries" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/69f8b0f2a9ba47e11f363d788cee9d6d.asciidoc000066400000000000000000000002761506101734100266720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/deprecation.asciidoc:146 [source, python] ---- resp = client.migration.deprecations( index="logstash-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a1702dd50690cae833572e48a0ddf25.asciidoc000066400000000000000000000005071506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:33 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "Will Smith", "fields": [ "title", "*_name" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a350a17701e8c8158407191f2718b66.asciidoc000066400000000000000000000002721506101734100260410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-unfollow.asciidoc:80 [source, python] ---- resp = client.ccr.unfollow( index="follower_index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a3a578ce37fb2c63ccfab7f75db9bae.asciidoc000066400000000000000000000004561506101734100271510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:295 [source, python] ---- resp = client.cluster.put_settings( persistent={ "ingest.geoip.downloader.enabled": False, "indices.lifecycle.history_index_enabled": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a3a86ff58e5f20950d429cf2832c229.asciidoc000066400000000000000000000002711506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/get-pipeline.asciidoc:82 [source, python] ---- resp = client.ingest.get_pipeline( id="my-pipeline-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a3f06962cceb3dfd3cd4fb5c679fa75.asciidoc000066400000000000000000000005031506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/mapping-charfilter.asciidoc:141 [source, python] ---- resp = client.indices.analyze( index="my-index-000001", tokenizer="keyword", char_filter=[ "my_mappings_char_filter" ], text="I'm delighted about it :(", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a50c1c53673fe9cc3cbda38a2853cdd.asciidoc000066400000000000000000000003471506101734100267150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:683 [source, python] ---- resp = client.sql.delete_async( id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a55dbba114c6c1408474f7e9cfdbb94.asciidoc000066400000000000000000000005551506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/register-repository.asciidoc:167 [source, python] ---- resp = client.snapshot.create_repository( name="my_unverified_backup", verify=False, repository={ "type": "fs", "settings": { "location": "my_unverified_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6a9655fe22fa5db7a540c145bcf1fb31.asciidoc000066400000000000000000000011241506101734100266230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:133 [source, python] ---- resp = client.index( index="stats-index", id="1", document={ "agg_metric": { "min": -302.5, "max": 702.3, "sum": 200, "value_count": 25 } }, ) print(resp) resp1 = client.index( index="stats-index", id="2", document={ "agg_metric": { "min": -93, "max": 1702.3, "sum": 300, "value_count": 25 } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6a969ebe7490d93d35be895b14e5a42a.asciidoc000066400000000000000000000002751506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:309 [source, python] ---- resp = client.indices.get( index="logs-my_app-default", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6aa2941855d13f365f70aa8767ecb137.asciidoc000066400000000000000000000017621506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/multi-fields.asciidoc:10 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "city": { "type": "text", "fields": { "raw": { "type": "keyword" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "city": "New York" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "city": "York" }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "match": { "city": "york" } }, sort={ "city.raw": "asc" }, aggs={ "Cities": { "terms": { "field": "city.raw" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/6aca241c0361d26f134712821e2d09a9.asciidoc000066400000000000000000000003211506101734100262260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/clean-up-repo-api.asciidoc:85 [source, python] ---- resp = client.snapshot.cleanup_repository( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6af9dc1c3240aa8e623ff3622bcb1b48.asciidoc000066400000000000000000000004051506101734100266200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/allocation_filtering.asciidoc:70 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.exclude._ip": "192.168.2.*" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b0288acb739c4667d41339e5100c327.asciidoc000066400000000000000000000004701506101734100261760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:234 [source, python] ---- resp = client.search( query={ "match": { "message": { "query": "this is a testt", "fuzziness": "AUTO" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b0d492c0f50103fefeab385a7bebd01.asciidoc000066400000000000000000000007641506101734100267010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/constant-keyword.asciidoc:11 [source, python] ---- resp = client.indices.create( index="logs-debug", mappings={ "properties": { "@timestamp": { "type": "date" }, "message": { "type": "text" }, "level": { "type": "constant_keyword", "value": "debug" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b104a66ab47fc1e1f24a5738f82feb4.asciidoc000066400000000000000000000005271506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/getting-started.asciidoc:288 [source, python] ---- resp = client.ccr.put_auto_follow_pattern( name="beats", remote_cluster="leader", leader_index_patterns=[ "metricbeat-*", "packetbeat-*" ], follow_index_pattern="{{leader_index}}-copy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b1336ff477f91d4a0db0b06db546ff0.asciidoc000066400000000000000000000002251506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/stop.asciidoc:51 [source, python] ---- resp = client.watcher.stop() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b1e837a8469eca2d03d5c36f5910f34.asciidoc000066400000000000000000000012441506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filter-aggregation.asciidoc:13 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", aggs={ "avg_price": { "avg": { "field": "price" } }, "t_shirts": { "filter": { "term": { "type": "t-shirt" } }, "aggs": { "avg_price": { "avg": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b3dcde0656d3a96dbcfed1ec814e10a.asciidoc000066400000000000000000000003141506101734100270440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shutdown/apis/shutdown-delete.asciidoc:71 [source, python] ---- resp = client.shutdown.delete_node( node_id="USpTGYaBSIKbgSUJR2Z9lg", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b67c6121efb86ee100d40c2646f77b5.asciidoc000066400000000000000000000005561506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/slowlog.asciidoc:219 [source, python] ---- resp = client.indices.put_settings( index="*", settings={ "index.search.slowlog.include.user": True, "index.search.slowlog.threshold.fetch.warn": "30s", "index.search.slowlog.threshold.query.warn": "30s" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b6e275efe3d2aafe0fc3443f2c96868.asciidoc000066400000000000000000000006111506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:161 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "google-vertex-ai-embeddings", "pipeline": "google_vertex_ai_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b6f5e0ab4ef523fc9a3a4a655848f64.asciidoc000066400000000000000000000005721506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:44 [source, python] ---- resp = client.search( query={ "sparse_vector": { "field": "ml.tokens", "query_vector": { "token1": 0.5, "token2": 0.3, "token3": 0.2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b6fd0a5942dfb9762ad2790cf421a80.asciidoc000066400000000000000000000023561506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-client.asciidoc:363 [source, python] ---- resp = client.search_application.put( name="my-example-app", search_application={ "indices": [ "example-index" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {{/query}}\n ],\n \"filter\": {{#toJson}}_es_filters{{/toJson}}\n }\n },\n \"_source\": {\n \"includes\": [\"title\", \"plot\"]\n },\n \"aggs\": {{#toJson}}_es_aggs{{/toJson}},\n \"from\": {{from}},\n \"size\": {{size}},\n \"sort\": {{#toJson}}_es_sort_fields{{/toJson}}\n }\n ", "params": { "query": "", "_es_filters": {}, "_es_aggs": {}, "_es_sort_fields": {}, "size": 10, "from": 0 }, "dictionary": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b77795e9249c8d9865f7a49fd86a863.asciidoc000066400000000000000000000004641506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/range-query.asciidoc:16 [source, python] ---- resp = client.search( query={ "range": { "age": { "gte": 10, "lte": 20, "boost": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6b8c5c8145c287c4fc535fa57ccf95a7.asciidoc000066400000000000000000000004071506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connector-sync-jobs-api.asciidoc:71 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", params={ "status": "pending" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6ba332596f5eb29660c90ab2d480e7dc.asciidoc000066400000000000000000000012041506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template-v1.asciidoc:189 [source, python] ---- resp = client.indices.put_template( name="template_1", index_patterns=[ "te*" ], order=0, settings={ "number_of_shards": 1 }, mappings={ "_source": { "enabled": False } }, ) print(resp) resp1 = client.indices.put_template( name="template_2", index_patterns=[ "tes*" ], order=1, settings={ "number_of_shards": 1 }, mappings={ "_source": { "enabled": True } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6baf72c04d48cb04c2f8be609ff3b3b5.asciidoc000066400000000000000000000007161506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:132 [source, python] ---- resp = client.search( index="test-index", query={ "match": { "my_semantic_field": "Which country is Paris in?" } }, highlight={ "fields": { "my_semantic_field": { "number_of_fragments": 2, "order": "score" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6bbc613bd4f9aec1bbdbabf5db021d28.asciidoc000066400000000000000000000012441506101734100271700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:232 [source, python] ---- resp = client.search( query={ "bool": { "should": [ { "match": { "title": "quick brown fox" } }, { "match": { "title.original": "quick brown fox" } }, { "match": { "title.shingles": "quick brown fox" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6bfa0a9a50c4e94276c7d63af1c31d9e.asciidoc000066400000000000000000000025371506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:25 [source, python] ---- resp = client.indices.create( index="place", mappings={ "properties": { "suggest": { "type": "completion", "contexts": [ { "name": "place_type", "type": "category" }, { "name": "location", "type": "geo", "precision": 4 } ] } } }, ) print(resp) resp1 = client.indices.create( index="place_path_category", mappings={ "properties": { "suggest": { "type": "completion", "contexts": [ { "name": "place_type", "type": "category", "path": "cat" }, { "name": "location", "type": "geo", "precision": 4, "path": "loc" } ] }, "loc": { "type": "geo_point" } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6c00dae1a456ae5e854e98e895dca2ab.asciidoc000066400000000000000000000007611506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:137 [source, python] ---- resp = client.search( query={ "function_score": { "query": { "match": { "message": "elasticsearch" } }, "script_score": { "script": { "source": "Math.log(2 + doc['my-int'].value)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c0acbff2df9003ccaf4350c9e2e186e.asciidoc000066400000000000000000000015561506101734100267750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-polygon-query.asciidoc:62 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_polygon": { "person.location": { "points": [ [ -70, 40 ], [ -80, 30 ], [ -90, 20 ] ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c3f7c8601e8cc13d36eef98a5e2cb34.asciidoc000066400000000000000000000015151506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:139 [source, python] ---- resp = client.indices.create( index="drivers", mappings={ "properties": { "driver": { "type": "nested", "properties": { "last_name": { "type": "text" }, "vehicle": { "type": "nested", "properties": { "make": { "type": "text" }, "model": { "type": "text" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c70b022a8a74b887fe46e514feb38c0.asciidoc000066400000000000000000000002621506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/recovery.asciidoc:18 [source, python] ---- resp = client.indices.recovery( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c72460570307f23478100db04a84c8e.asciidoc000066400000000000000000000003031506101734100261020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-component-template.asciidoc:92 [source, python] ---- resp = client.cluster.get_component_template( name="temp*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c72f6791ba9223943f7556c5bfaa728.asciidoc000066400000000000000000000007101506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:58 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "user.id": "kimchy" } }, fields=[ "user.id", "http.response.*", { "field": "@timestamp", "format": "epoch_millis" } ], source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6c8bf6d4d68b7756f953be4c07655337.asciidoc000066400000000000000000000005651506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-reload-secure-settings.asciidoc:69 [source, python] ---- resp = client.nodes.reload_secure_settings( secure_settings_password="keystore-password", ) print(resp) resp1 = client.nodes.reload_secure_settings( node_id="nodeId1,nodeId2", secure_settings_password="keystore-password", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6c927313867647e0ef3cd3a37cb410cc.asciidoc000066400000000000000000000003471506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:185 [source, python] ---- resp = client.security.invalidate_api_key( username="myuser", realm_name="native1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6cb1dae368c945ecf7c9ec332a5743a2.asciidoc000066400000000000000000000015001506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:180 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "text": { "type": "text", "fields": { "raw": { "type": "keyword" } } } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "text": [ "the quick brown fox", "the quick brown fox", "jumped over the lazy dog" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6cd083045bf06e80b83889a939a18451.asciidoc000066400000000000000000000040521506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/nested.asciidoc:87 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "user": { "type": "nested" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "group": "fans", "user": [ { "first": "John", "last": "Smith" }, { "first": "Alice", "last": "White" } ] }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "nested": { "path": "user", "query": { "bool": { "must": [ { "match": { "user.first": "Alice" } }, { "match": { "user.last": "Smith" } } ] } } } }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "nested": { "path": "user", "query": { "bool": { "must": [ { "match": { "user.first": "Alice" } }, { "match": { "user.last": "White" } } ] } }, "inner_hits": { "highlight": { "fields": { "user.first": {} } } } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/6ce6cac9df216c52371c2e77e6e07ba1.asciidoc000066400000000000000000000031041506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/put-query-ruleset.asciidoc:123 [source, python] ---- resp = client.query_rules.put_ruleset( ruleset_id="my-ruleset", rules=[ { "rule_id": "my-rule1", "type": "pinned", "criteria": [ { "type": "contains", "metadata": "user_query", "values": [ "pugs", "puggles" ] }, { "type": "exact", "metadata": "user_country", "values": [ "us" ] } ], "actions": { "ids": [ "id1", "id2" ] } }, { "rule_id": "my-rule2", "type": "exclude", "criteria": [ { "type": "fuzzy", "metadata": "user_query", "values": [ "rescue dogs" ] } ], "actions": { "docs": [ { "_index": "index1", "_id": "id3" }, { "_index": "index2", "_id": "id4" } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6ce8334def48552ba7d44025580d9105.asciidoc000066400000000000000000000003341506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:242 [source, python] ---- resp = client.indices.create( index="", aliases={ "my-alias": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6cf3307c00f464c46475e352e067d714.asciidoc000066400000000000000000000013601506101734100261770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:103 [source, python] ---- resp = client.search( index="my_geoshapes", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": { "lat": 40.73, "lon": -74.1 }, "bottom_right": { "lat": 40.01, "lon": -71.12 } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6d48f83c4a36d0544d876d3eff48dcef.asciidoc000066400000000000000000000002501506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:262 [source, python] ---- resp = client.slm.execute_retention() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6d81c749ff9554044ee5f3ad92dcb89a.asciidoc000066400000000000000000000026761506101734100266210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-tsds.asciidoc:58 [source, python] ---- resp = client.ilm.put_lifecycle( name="my-weather-sensor-lifecycle-policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_age": "1d", "max_primary_shard_size": "50gb" } } }, "warm": { "min_age": "30d", "actions": { "shrink": { "number_of_shards": 1 }, "forcemerge": { "max_num_segments": 1 } } }, "cold": { "min_age": "60d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "frozen": { "min_age": "90d", "actions": { "searchable_snapshot": { "snapshot_repository": "found-snapshots" } } }, "delete": { "min_age": "735d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6db118771354792646229e7a3c30c7e9.asciidoc000066400000000000000000000025011506101734100261320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:991 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a" }, { "index": {} }, { "timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b" }, { "index": {} }, { "timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a" }, { "index": {} }, { "timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b" }, { "index": {} }, { "timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c" }, { "index": {} }, { "timestamp": 1516297294000, "temperature": 202, "voltage": 4, "node": "c" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6dbfe5565a95508e65d304131847f9fc.asciidoc000066400000000000000000000005561506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/edgengram-tokenfilter.asciidoc:34 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "edge_ngram", "min_gram": 1, "max_gram": 2 } ], text="the quick brown fox jumps", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6dcd3916679f6aa64f79524c75991ebd.asciidoc000066400000000000000000000005641506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:248 [source, python] ---- resp = client.esql.query( query="\n FROM library\n | EVAL year = DATE_EXTRACT(\"year\", release_date)\n | WHERE page_count > 300 AND author == \"Frank Herbert\"\n | STATS count = COUNT(*) by year\n | WHERE count > 0\n | LIMIT 5\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6dd2a107bc64fd6f058fb17c21640649.asciidoc000066400000000000000000000003411506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:216 [source, python] ---- resp = client.security.invalidate_token( username="myuser", realm_name="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6dd4c02fe3d6b800648a04d3e2d29fc1.asciidoc000066400000000000000000000003631506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/delete-snapshot-api.asciidoc:78 [source, python] ---- resp = client.snapshot.delete( repository="my_repository", snapshot="snapshot_2,snapshot_3", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6ddd4e657efbf45def430a6419825796.asciidoc000066400000000000000000000007761506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-azure-ai-studio.asciidoc:185 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="azure_ai_studio_completion", inference_config={ "service": "azureaistudio", "service_settings": { "api_key": "", "target": "", "provider": "", "endpoint_type": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e000496a1fa8b57148518eaad692f35.asciidoc000066400000000000000000000003001506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-all-query.asciidoc:39 [source, python] ---- resp = client.search( query={ "match_none": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e0b675eff7ed73c09a76a415930a486.asciidoc000066400000000000000000000006331506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/parent-id-query.asciidoc:24 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my-join-field": { "type": "join", "relations": { "my-parent": "my-child" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e1157f3184fa192d47a3d0e3ea17a6c.asciidoc000066400000000000000000000012411506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/unique-tokenfilter.asciidoc:130 [source, python] ---- resp = client.indices.create( index="letter_unique_pos_example", settings={ "analysis": { "analyzer": { "letter_unique_pos": { "tokenizer": "letter", "filter": [ "unique_pos" ] } }, "filter": { "unique_pos": { "type": "unique", "only_on_same_position": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e1ae8d6103e0b77f14fb0ea1bfb7ffa.asciidoc000066400000000000000000000004421506101734100270430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:397 [source, python] ---- resp = client.index( index="example", document={ "location": "GEOMETRYCOLLECTION (POINT (1000.0 100.0), LINESTRING (1001.0 100.0, 1002.0 100.0))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e498b9dc753b94abf2618c407fa5cd8.asciidoc000066400000000000000000000005041506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:453 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": ".ml-anomalies-custom-example" }, dest={ "index": ".reindexed-v9-ml-anomalies-custom-example" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e6b78e6b689a5d6aa637271b6d084e2.asciidoc000066400000000000000000000027441506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:363 [source, python] ---- resp = client.search( index="movies", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "sparse_vector": { "field": "plot_embedding", "inference_id": "my-elser-model", "query": "films that explore psychological depths" } } } }, { "standard": { "query": { "multi_match": { "query": "crime", "fields": [ "plot", "title" ] } } } }, { "knn": { "field": "vector", "query_vector": [ 10, 22, 77 ], "k": 10, "num_candidates": 10 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6e86225ed4a6e3be8078b83ef301f731.asciidoc000066400000000000000000000005501506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:66 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "document": { "message": "A new bonsai tree in the office" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6ea062455229151e311869a81ee40252.asciidoc000066400000000000000000000010111506101734100260170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:83 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, ) print(resp) resp1 = client.search( index="_all", query={ "match": { "user.id": "kimchy" } }, ) print(resp1) resp2 = client.search( index="*", query={ "match": { "user.id": "kimchy" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/6edfc35a66afd9b884431fccf48fdbf5.asciidoc000066400000000000000000000007161506101734100271060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-with-synonyms.asciidoc:114 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "lowercase", { "type": "synonym_graph", "synonyms": [ "pc => personal computer", "computer, pc, laptop" ] } ], text="Check how PC synonyms work", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6eead05dd3b04722ef0ea5644c2e047d.asciidoc000066400000000000000000000025541506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-script-aggregation.asciidoc:50 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "total_sales": { "sum": { "field": "price" } }, "t-shirts": { "filter": { "term": { "type": "t-shirt" } }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "t-shirt-percentage": { "bucket_script": { "buckets_path": { "tShirtSales": "t-shirts>sales", "totalSales": "total_sales" }, "script": "params.tShirtSales / params.totalSales * 100" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f0389ac52808df23bb6081a1acd4eed.asciidoc000066400000000000000000000003201506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/built-in-users.asciidoc:158 [source, python] ---- resp = client.security.enable_user( username="logstash_system", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f07152055e99416deb10e95b428b847.asciidoc000066400000000000000000000012731506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/edgengram-tokenfilter.asciidoc:199 [source, python] ---- resp = client.indices.create( index="edge_ngram_custom_example", settings={ "analysis": { "analyzer": { "default": { "tokenizer": "whitespace", "filter": [ "3_5_edgegrams" ] } }, "filter": { "3_5_edgegrams": { "type": "edge_ngram", "min_gram": 3, "max_gram": 5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f34e27481460a95e59ffbacb76bd637.asciidoc000066400000000000000000000025471506101734100265270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/custom-analyzer.asciidoc:159 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_analyzer": { "char_filter": [ "emoticons" ], "tokenizer": "punctuation", "filter": [ "lowercase", "english_stop" ] } }, "tokenizer": { "punctuation": { "type": "pattern", "pattern": "[ .,!?]" } }, "char_filter": { "emoticons": { "type": "mapping", "mappings": [ ":) => _happy_", ":( => _sad_" ] } }, "filter": { "english_stop": { "type": "stop", "stopwords": "_english_" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_custom_analyzer", text="I'm a :) person, and you?", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6f3b723bf6179b96c3413597ed7f49e1.asciidoc000066400000000000000000000004111506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-update-api-keys.asciidoc:302 [source, python] ---- resp = client.security.bulk_update_api_keys( ids=[ "VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f48ab7cbb8a4a46d0e9272c07166eaf.asciidoc000066400000000000000000000003531506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/sql-translate-api.asciidoc:18 [source, python] ---- resp = client.sql.translate( query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=10, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f4cbebfd6d2cee54aa3e7a86a755ef8.asciidoc000066400000000000000000000015601506101734100271550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/knn-query.asciidoc:210 [source, python] ---- resp = client.search( index="my-image-index", size=3, query={ "bool": { "should": [ { "match": { "title": { "query": "mountain lake", "boost": 1 } } }, { "knn": { "field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 10, "boost": 2 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f5adbd55a3a2760e7fe9d32df18b1a1.asciidoc000066400000000000000000000005271506101734100267150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:114 [source, python] ---- resp = client.index( index="logs", document={ "timestamp": "2015-05-17T18:12:07.613Z", "request": "GET index.html", "status_code": 404, "message": "Error: File not found" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f6d5a4a90e1265822628d4ced963639.asciidoc000066400000000000000000000005131506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:63 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "create_date": "2015/09/02" }, ) print(resp) resp1 = client.indices.get_mapping( index="my-index-000001", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/6f842819c50e8490080dd085e0c6aca3.asciidoc000066400000000000000000000004531506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/normalizer.asciidoc:125 [source, python] ---- resp = client.search( index="index", size=0, aggs={ "foo_terms": { "terms": { "field": "foo" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f855bc92b4cc6e6a63f95bce1cb4441.asciidoc000066400000000000000000000002761506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/logstash/get-pipeline.asciidoc:75 [source, python] ---- resp = client.logstash.get_pipeline( id="my_pipeline", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f8a682c908b826ca90cadd9d2f582b4.asciidoc000066400000000000000000000004661506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:670 [source, python] ---- resp = client.search( stored_fields=[ "user", "postDate" ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6f8bdca97e43aac75e32de655aa4314a.asciidoc000066400000000000000000000003461506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:450 [source, python] ---- resp = client.connector.delete( connector_id="my-connector-id&delete_sync_jobs=true", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fa02c2ad485bbe91f44b321158250f3.asciidoc000066400000000000000000000011701506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/search-as-you-type.asciidoc:87 [source, python] ---- resp = client.search( index="my-index-000001", query={ "multi_match": { "query": "brown f", "type": "bool_prefix", "fields": [ "my_field", "my_field._2gram", "my_field._3gram" ] } }, highlight={ "fields": { "my_field": { "matched_fields": [ "my_field._index_prefix" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fa570ae7039171e2ab722344ec1063f.asciidoc000066400000000000000000000003271506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:20 [source, python] ---- resp = client.indices.get_field_mapping( index="my-index-000001", fields="user", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fbb88f399618e1b47412082062ce2bd.asciidoc000066400000000000000000000020321506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:537 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_logs" }, pivot={ "group_by": { "timestamp": { "date_histogram": { "field": "timestamp", "fixed_interval": "1h" } } }, "aggregations": { "bytes.max": { "max": { "field": "bytes" } }, "top": { "top_metrics": { "metrics": [ { "field": "clientip" }, { "field": "geo.src" } ], "sort": { "bytes": "desc" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fbbf40cab0187f544ff7bca31d18d57.asciidoc000066400000000000000000000025521506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:253 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "Polygon", "coordinates": [ [ [ 100, 0 ], [ 101, 0 ], [ 101, 1 ], [ 100, 1 ], [ 100, 0 ] ], [ [ 100.2, 0.2 ], [ 100.8, 0.2 ], [ 100.8, 0.8 ], [ 100.2, 0.8 ], [ 100.2, 0.2 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fc778e9a888b16b937c5c2a7a1ec140.asciidoc000066400000000000000000000003201506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // searchable-snapshots/apis/clear-cache.asciidoc:75 [source, python] ---- resp = client.searchable_snapshots.clear_cache( index="my-index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fd82baa17a48e09e3d2eed514af7f46.asciidoc000066400000000000000000000031321506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:359 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "MultiLineString", "coordinates": [ [ [ 102, 2 ], [ 103, 2 ], [ 103, 3 ], [ 102, 3 ] ], [ [ 100, 0 ], [ 101, 0 ], [ 101, 1 ], [ 100, 1 ] ], [ [ 100.2, 0.2 ], [ 100.8, 0.2 ], [ 100.8, 0.8 ], [ 100.2, 0.8 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6fe6c095c6995e0f2214f5f3bc85d74e.asciidoc000066400000000000000000000003311506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/delete-lifecycle.asciidoc:83 [source, python] ---- resp = client.indices.delete_data_lifecycle( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/6febf0e6883b23b15ac213abc4bac326.asciidoc000066400000000000000000000010511506101734100266660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:282 [source, python] ---- resp = client.search( index="place", suggest={ "place_suggestion": { "prefix": "tim", "completion": { "field": "suggest", "size": 10, "contexts": { "location": { "lat": 43.662, "lon": -79.38 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7011fcdd231804f9c3894154ae2c3fbc.asciidoc000066400000000000000000000004771506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/sparse-vector.asciidoc:14 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "text.tokens": { "type": "sparse_vector" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/701f1fffc65e9e51c96aa60261e2eae3.asciidoc000066400000000000000000000003271506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-cross-cluster-api-key.asciidoc:126 [source, python] ---- resp = client.security.get_api_key( id="VuaCfGcBCdbkQm-e5aOx", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7021ddb273a3a00847324d2f670c4c04.asciidoc000066400000000000000000000015731506101734100262340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:548 [source, python] ---- resp = client.search( index="image-index", query={ "match": { "title": { "query": "mountain lake", "boost": 0.9 } } }, knn=[ { "field": "image-vector", "query_vector": [ 54, 10, -2 ], "k": 5, "num_candidates": 50, "boost": 0.1 }, { "field": "title-vector", "query_vector": [ 1, 20, -52, 23, 10 ], "k": 10, "num_candidates": 10, "boost": 0.5 } ], size=10, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7067a498bb6c788854a26443a64b843a.asciidoc000066400000000000000000000013301506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/script-query.asciidoc:87 [source, python] ---- resp = client.search( runtime_mappings={ "amount.signed": { "type": "double", "script": "\n double amount = doc['amount'].value;\n if (doc['type'].value == 'expense') {\n amount *= -1;\n }\n emit(amount);\n " } }, query={ "bool": { "filter": { "range": { "amount.signed": { "lt": 10 } } } } }, fields=[ { "field": "amount.signed" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/708e7ec681be41791f232817a07cda82.asciidoc000066400000000000000000000004211506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:538 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot*", size="2", sort="name", offset="2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/70bbe14bc4d5a5d58e81ab2b02408817.asciidoc000066400000000000000000000005431506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-pki-realm.asciidoc:159 [source, python] ---- resp = client.security.put_role_mapping( name="users", roles=[ "user" ], rules={ "field": { "dn": "cn=John Doe,ou=example,o=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/70c736ecb3746dbe839af0e468712805.asciidoc000066400000000000000000000003261506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/delete-transform.asciidoc:59 [source, python] ---- resp = client.transform.delete_transform( transform_id="ecommerce_transform", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/70cc66bf4054ebf0ad4955cb99d9ab80.asciidoc000066400000000000000000000004651506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/update-trained-model-deployment.asciidoc:80 [source, python] ---- resp = client.ml.update_trained_model_deployment( model_id="elastic__distilbert-base-uncased-finetuned-conll03-english", number_of_allocations=4, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/70f89dd6b71ea890ad3cf47d83e43344.asciidoc000066400000000000000000000013371506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:66 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", description="My optional pipeline description", processors=[ { "set": { "description": "My optional processor description", "field": "my-long-field", "value": 10 } }, { "set": { "description": "Set 'my-boolean-field' to true", "field": "my-boolean-field", "value": True } }, { "lowercase": { "field": "my-keyword-field" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7106e6317e6368b9863cf64df9c6f0c9.asciidoc000066400000000000000000000012031506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/put-transform.asciidoc:384 [source, python] ---- resp = client.transform.put_transform( transform_id="ecommerce_transform2", source={ "index": "kibana_sample_data_ecommerce" }, latest={ "unique_key": [ "customer_id" ], "sort": "order_date" }, description="Latest order for each customer", dest={ "index": "kibana_sample_data_ecommerce_transform2" }, frequency="5m", sync={ "time": { "field": "order_date", "delay": "60s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/711443504b69d0d296e717c716a223e2.asciidoc000066400000000000000000000017721506101734100261200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:212 [source, python] ---- resp = client.search( index="drivers", query={ "nested": { "path": "driver", "query": { "nested": { "path": "driver.vehicle", "query": { "bool": { "must": [ { "match": { "driver.vehicle.make": "Powell Motors" } }, { "match": { "driver.vehicle.model": "Canyonero" } } ] } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7148c8512079d378af70302e65502dd2.asciidoc000066400000000000000000000004151506101734100261170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:378 [source, python] ---- resp = client.indices.create( index="timeseries-000001", aliases={ "timeseries": { "is_write_index": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7163346755400594d1dd7e445aa19ff0.asciidoc000066400000000000000000000002651506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:426 [source, python] ---- resp = client.search( index="music", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/719141517d83b7e8e929b347a8d67c9f.asciidoc000066400000000000000000000004651506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:337 [source, python] ---- resp = client.indices.get( index="kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", features="settings", flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/71998bb300ac2a58419b0772cdc1c586.asciidoc000066400000000000000000000013311506101734100263320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/version.asciidoc:85 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "versions": { "type": "version" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "versions": [ "8.0.0-beta1", "8.5.0", "0.90.12", "2.6.1", "1.3.4", "1.3.4" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/71c629c44bf3c542a0daacbfc253c4b0.asciidoc000066400000000000000000000002711506101734100266660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/stats.asciidoc:1907 [source, python] ---- resp = client.cluster.stats( node_id="node1,node*,master:false", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/71de08a2d962c66f0c60677eff23f8d1.asciidoc000066400000000000000000000014171506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:123 [source, python] ---- resp = client.search( index="sales", aggs={ "price_ranges": { "range": { "field": "price", "keyed": True, "ranges": [ { "key": "cheap", "to": 100 }, { "key": "average", "from": 100, "to": 200 }, { "key": "expensive", "from": 200 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/71e47a83f632ef159956287bbfe4ca12.asciidoc000066400000000000000000000012421506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/shape-query.asciidoc:54 [source, python] ---- resp = client.search( index="example", query={ "shape": { "geometry": { "shape": { "type": "envelope", "coordinates": [ [ 1355, 5355 ], [ 1400, 5200 ] ] }, "relation": "within" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/71fa652ddea811eb3c8bf8c5db21e549.asciidoc000066400000000000000000000003461506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:230 [source, python] ---- resp = client.indices.analyze( index="analyze_sample", analyzer="whitespace", text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/722238b4e7b78cdb3c6a986780e7e286.asciidoc000066400000000000000000000011141506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-field-note.asciidoc:105 [source, python] ---- resp = client.search( index="range_index", size="0", query={ "range": { "time_frame": { "gte": "2019-11-01", "format": "yyyy-MM-dd" } } }, aggs={ "november_data": { "date_histogram": { "field": "time_frame", "calendar_interval": "day", "format": "yyyy-MM-dd" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/726994d8f3793b86628255a797155a52.asciidoc000066400000000000000000000003231506101734100260200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors.asciidoc:19 [source, python] ---- resp = client.nodes.info( node_id="ingest", filter_path="nodes.*.ingest.processors", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/72a3668ddc95d9aec47cc679d1e7afc5.asciidoc000066400000000000000000000014611506101734100267470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:79 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_one": { "seeds": [ "35.238.149.1:9300" ], "skip_unavailable": True }, "cluster_two": { "seeds": [ "35.238.149.2:9300" ], "skip_unavailable": False }, "cluster_three": { "seeds": [ "35.238.149.3:9300" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/72ae3851160fcf02b8e2cdfd4e57d238.asciidoc000066400000000000000000000002471506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/start-ilm.asciidoc:66 [source, python] ---- resp = client.ilm.start() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/72b999120785dfba2827268482e9be0a.asciidoc000066400000000000000000000036271506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geobounds-aggregation.asciidoc:12 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", query={ "match": { "name": "musée" } }, aggs={ "viewport": { "geo_bounds": { "field": "location", "wrap_longitude": True } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/72bae0252b74ff6fd9f0702ff008d84a.asciidoc000066400000000000000000000004151506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:670 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="*", sort="name", from_sort_value="snapshot_2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/72beebe779a258c225dee7b023e60c52.asciidoc000066400000000000000000000003121506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:152 [source, python] ---- resp = client.nodes.stats( metric="indices", index_metric="search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/730045fae3743c39b612813a42c330c3.asciidoc000066400000000000000000000007571506101734100261630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/index-prefixes.asciidoc:64 [source, python] ---- resp = client.search( index="my-index-000001", query={ "prefix": { "full_name": { "value": "ki" } } }, highlight={ "fields": { "full_name": { "matched_fields": [ "full_name._index_prefix" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73250f845738c428246a3ade66a8f54c.asciidoc000066400000000000000000000020741506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/weighted-avg-aggregation.asciidoc:152 [source, python] ---- resp = client.index( index="exams", refresh=True, document={ "grade": 100, "weight": [ 2, 3 ] }, ) print(resp) resp1 = client.index( index="exams", refresh=True, document={ "grade": 80, "weight": 3 }, ) print(resp1) resp2 = client.search( index="exams", filter_path="aggregations", size=0, runtime_mappings={ "weight.combined": { "type": "double", "script": "\n double s = 0;\n for (double w : doc['weight']) {\n s += w;\n }\n emit(s);\n " } }, aggs={ "weighted_grade": { "weighted_avg": { "value": { "script": "doc.grade.value + 1" }, "weight": { "field": "weight.combined" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/734e2b1d1ca84a305240a449738f0eba.asciidoc000066400000000000000000000004451506101734100263770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:467 [source, python] ---- resp = client.cat.indices( v=True, index=".ds-my-data-stream-2022.06.17-000001,kibana_sample_data_flightsh=index,status,health", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73646c12ad33a813ab2280f1dc83500e.asciidoc000066400000000000000000000004411506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/put-follow.asciidoc:30 [source, python] ---- resp = client.ccr.follow( index="", wait_for_active_shards="1", remote_cluster="", leader_index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/738db420e3ad2a127ea75fb8e5051926.asciidoc000066400000000000000000000002621506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:455 [source, python] ---- resp = client.search( index="last-log-from-clientip", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73b07b24ab2c4cd304a57f9cbda8b863.asciidoc000066400000000000000000000003221506101734100266210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/list-analytics-collection.asciidoc:66 [source, python] ---- resp = client.search_application.get_behavioral_analytics() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73be1f93d789264e5b972ddb5991bc66.asciidoc000066400000000000000000000003541506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/logging-config.asciidoc:180 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.discovery": "DEBUG" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73d1a6c5ef90b7e35d43a0bfdc1e158d.asciidoc000066400000000000000000000003111506101734100267040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-index-caps.asciidoc:95 [source, python] ---- resp = client.rollup.get_rollup_index_caps( index="sensor_rollup", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73df03be6ee78b10106581dbd7cb39ef.asciidoc000066400000000000000000000014321506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:489 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.ewma(values, 0.3)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73ebc89cb32adb389ae16bb088d7c7e6.asciidoc000066400000000000000000000004131506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:242 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.enable": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73f9271dee9b8539b6aa7e17f323c623.asciidoc000066400000000000000000000014241506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/multi-terms-aggregation.asciidoc:342 [source, python] ---- resp = client.search( index="products", aggs={ "genres_and_products": { "multi_terms": { "terms": [ { "field": "genre" }, { "field": "product" } ], "order": { "total_quantity": "desc" } }, "aggs": { "total_quantity": { "sum": { "field": "quantity" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/73fa0d6d03cd98ea538fff9e89d99eed.asciidoc000066400000000000000000000003551506101734100270460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-service-accounts.asciidoc:63 [source, python] ---- resp = client.security.get_service_accounts( namespace="elastic", service="fleet-server", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7404c6e809fee5d7eb9678a82a872806.asciidoc000066400000000000000000000007441506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:180 [source, python] ---- resp = client.search( index="my-index-000001", aggs={ "my-agg-name": { "terms": { "field": "my-field" }, "aggs": { "my-sub-agg-name": { "avg": { "field": "my-other-field" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/741180473ba526219578ad0422f4fe81.asciidoc000066400000000000000000000013031506101734100261110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-features-api.asciidoc:97 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/my-connector/_features", headers={"Content-Type": "application/json"}, body={ "features": { "document_level_security": { "enabled": True }, "incremental_sync": { "enabled": True }, "sync_rules": { "advanced": { "enabled": False }, "basic": { "enabled": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7429b16221fe741fd31b0584786dd0b0.asciidoc000066400000000000000000000005731506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/post-inference.asciidoc:249 [source, python] ---- resp = client.inference.inference( task_type="text_embedding", inference_id="my-cohere-endpoint", input="The sky above the port was the color of television tuned to a dead channel.", task_settings={ "input_type": "ingest" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/744aeb2af40f519e430e21e004e3c3b7.asciidoc000066400000000000000000000014251506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:99 [source, python] ---- resp = client.indices.create( index="mv", mappings={ "properties": { "b": { "type": "long" } } }, ) print(resp) resp1 = client.bulk( index="mv", refresh=True, operations=[ { "index": {} }, { "a": 1, "b": [ 2, 2, 1 ] }, { "index": {} }, { "a": 2, "b": [ 1, 1 ] } ], ) print(resp1) resp2 = client.esql.query( query="FROM mv | LIMIT 2", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/7456ef459d510d66ba4492cc9fbdc6c6.asciidoc000066400000000000000000000007431506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-connect.asciidoc:194 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_two": { "mode": None, "seeds": None, "skip_unavailable": None, "transport.compress": None } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/745864ef2427188241a4702b94ea57be.asciidoc000066400000000000000000000012731506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:164 [source, python] ---- resp = client.search( index="sales", size="0", query={ "constant_score": { "filter": { "range": { "price": { "lte": "500" } } } } }, aggs={ "prices": { "histogram": { "field": "price", "interval": 50, "extended_bounds": { "min": 0, "max": 500 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/74678f8bbc7e4fc1885719d1cf63ac67.asciidoc000066400000000000000000000014121506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/daterange-aggregation.asciidoc:354 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "range": { "date_range": { "field": "date", "format": "MM-yyy", "ranges": [ { "from": "01-2015", "to": "03-2015", "key": "quarter_01" }, { "from": "03-2015", "to": "06-2015", "key": "quarter_02" } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/746e0a1cb5984f2672963b363505c7b3.asciidoc000066400000000000000000000012451506101734100262070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date.asciidoc:188 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "date": { "type": "date", "format": "strict_date_optional_time||epoch_second" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="example", refresh=True, document={ "date": 1618321898 }, ) print(resp1) resp2 = client.search( index="my-index-000001", fields=[ { "field": "date" } ], source=False, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/746e87db7e1e8b5e6b40d8b5b188de42.asciidoc000066400000000000000000000004761506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/stats-aggregation.asciidoc:14 [source, python] ---- resp = client.search( index="exams", size="0", aggs={ "grades_stats": { "stats": { "field": "grade" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7471e97aaaf21c3a200abdd89f15c3cc.asciidoc000066400000000000000000000011401506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:393 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "match": { "query": "hot porridge", "max_gaps": 10, "filter": { "not_containing": { "match": { "query": "salty" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7478ff69113fb53f41ea07cdf911fa67.asciidoc000066400000000000000000000015351506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:1343 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "daily_sales": { "date_histogram": { "field": "order_date", "calendar_interval": "day" }, "aggs": { "daily_revenue": { "sum": { "field": "taxful_total_price" } }, "smoothed_revenue": { "moving_fn": { "buckets_path": "daily_revenue", "window": 3, "script": "MovingFunctions.unweightedAvg(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/747a4b5001423938d7d05399d28f1995.asciidoc000066400000000000000000000003561506101734100260630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-with-existing-indices.asciidoc:74 [source, python] ---- resp = client.cluster.put_settings( persistent={ "indices.lifecycle.poll_interval": "1m" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/74a80c28737a0648db0dfe7f049d12f2.asciidoc000066400000000000000000000002551506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:278 [source, python] ---- resp = client.exists( index="my-index-000001", id="0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/74b13ceb6cda3acaa9e9f58c9e5e2431.asciidoc000066400000000000000000000005041506101734100267760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/meta-field.asciidoc:31 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", meta={ "class": "MyApp2::User3", "version": { "min": "1.3", "max": "1.5" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/74da377bccad43da2b0e276c086d26ba.asciidoc000066400000000000000000000007731506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/cluster-info.asciidoc:388 [source, python] ---- resp = client.cluster.info( target="_all", ) print(resp) resp1 = client.cluster.info( target="http", ) print(resp1) resp2 = client.cluster.info( target="ingest", ) print(resp2) resp3 = client.cluster.info( target="thread_pool", ) print(resp3) resp4 = client.cluster.info( target="script", ) print(resp4) resp5 = client.cluster.info( target="http,ingest", ) print(resp5) ---- python-elasticsearch-9.1.1/docs/examples/750ac969f9a05567f5cdf4f93d6244b6.asciidoc000066400000000000000000000006531506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:281 [source, python] ---- resp = client.cluster.reroute( commands=[ { "allocate_empty_primary": { "index": "my-index", "shard": 0, "node": "my-node", "accept_data_loss": "true" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7594a9a85c8511701e281974cbc253e1.asciidoc000066400000000000000000000010511506101734100262020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:236 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="amazon_bedrock_embeddings", inference_config={ "service": "amazonbedrock", "service_settings": { "access_key": "", "secret_key": "", "region": "", "provider": "", "model": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75957a7d1b67e3d47899c5f18b32cb61.asciidoc000066400000000000000000000003041506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/close-job.asciidoc:105 [source, python] ---- resp = client.ml.close_job( job_id="low_request_rate", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75aba7b1d3a22dce62f26b8b1e6bee58.asciidoc000066400000000000000000000005051506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:173 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", explain=True, query={ "query_string": { "query": "@timestamp:foo", "lenient": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75c347b181112d2c4538c01ade903afe.asciidoc000066400000000000000000000006011506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:257 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", rewrite=True, query={ "match": { "user.id": { "query": "kimchy", "fuzziness": "auto" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75e13a00f0909c955031ff62acc14a79.asciidoc000066400000000000000000000007231506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:12 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "GET /search" } }, collapse={ "field": "user.id" }, sort=[ { "http.response.bytes": { "order": "desc" } } ], from_=0, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75e360d03fb416f0a65ca37c662c2e9c.asciidoc000066400000000000000000000015421506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/scripted-metric-aggregation.asciidoc:159 [source, python] ---- resp = client.bulk( index="transactions", refresh=True, operations=[ { "index": { "_id": 1 } }, { "type": "sale", "amount": 80 }, { "index": { "_id": 2 } }, { "type": "cost", "amount": 10 }, { "index": { "_id": 3 } }, { "type": "cost", "amount": 30 }, { "index": { "_id": 4 } }, { "type": "sale", "amount": 130 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/75e6d66e94e61bd8a555beaaee255c36.asciidoc000066400000000000000000000005251506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-search.asciidoc:178 [source, python] ---- resp = client.rollup.rollup_search( index="sensor_rollup", size=0, aggregations={ "avg_temperature": { "avg": { "field": "temperature" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/763ce1377c8dfa1ca6a042d8ee99f4f5.asciidoc000066400000000000000000000002601506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/tsds-reindex.asciidoc:284 [source, python] ---- resp = client.indices.rollover( alias="k9s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76448aaaaa2c352bb6e09d2f83a3fbb3.asciidoc000066400000000000000000000004051506101734100266760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/letter-tokenizer.asciidoc:16 [source, python] ---- resp = client.indices.analyze( tokenizer="letter", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7659f2f2b0fbe8584b855a01638b95ed.asciidoc000066400000000000000000000010441506101734100264400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:777 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "user_name": { "terms": { "field": "user_name" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/765c9c8b40b67a42121648045dbf10fb.asciidoc000066400000000000000000000003141506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/jvm-memory-pressure.asciidoc:11 [source, python] ---- resp = client.nodes.stats( filter_path="nodes.*.jvm.mem.pools.old", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/766cfc1c9fcd2c186e965761ceb2c07d.asciidoc000066400000000000000000000004651506101734100266610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-tier-capacity.asciidoc:300 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "number_of_replicas": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/769f75829a8e6670aa4cf83d0d737046.asciidoc000066400000000000000000000015231506101734100263100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:124 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "date": "2015-10-01T00:30:00Z" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "date": "2015-10-01T01:30:00Z" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="3", refresh=True, document={ "date": "2015-10-01T02:30:00Z" }, ) print(resp2) resp3 = client.search( index="my-index-000001", size="0", aggs={ "by_day": { "auto_date_histogram": { "field": "date", "buckets": 3 } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/76b279835936ee4b546a171c671c3cd7.asciidoc000066400000000000000000000004341506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/cjk-width-tokenfilter.asciidoc:28 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "cjk_width" ], text="シーサイドライナー", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76bc87c2592864152768687c2963d1d1.asciidoc000066400000000000000000000012561506101734100260750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-api-key.asciidoc:154 [source, python] ---- resp = client.security.update_api_key( id="VuaCfGcBCdbkQm-e5aOx", role_descriptors={ "role-a": { "indices": [ { "names": [ "*" ], "privileges": [ "write" ] } ] } }, metadata={ "environment": { "level": 2, "trusted": True, "tags": [ "production" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76c167d8ab305cb43b594f140c902dfe.asciidoc000066400000000000000000000006241506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shrink-index.asciidoc:168 [source, python] ---- resp = client.indices.shrink( index="my_source_index", target="my_target_index", settings={ "index.number_of_replicas": 1, "index.number_of_shards": 1, "index.codec": "best_compression" }, aliases={ "my_search_indices": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76c73b54f3f1e5cb1c0fcccd7c3fd18e.asciidoc000066400000000000000000000036311506101734100270610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/ingest-vectors.asciidoc:86 [source, python] ---- resp = client.bulk( operations=[ { "index": { "_index": "amazon-reviews", "_id": "2" } }, { "review_text": "This product is amazing! I love it.", "review_vector": [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 ] }, { "index": { "_index": "amazon-reviews", "_id": "3" } }, { "review_text": "This product is terrible. I hate it.", "review_vector": [ 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1 ] }, { "index": { "_index": "amazon-reviews", "_id": "4" } }, { "review_text": "This product is great. I can do anything with it.", "review_vector": [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 ] }, { "index": { "_index": "amazon-reviews", "_id": "5" } }, { "review_text": "This product has ruined my life and the lives of my family and friends.", "review_vector": [ 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1 ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76dbdd0b2bd48c3c6b1a8d81e23bafd6.asciidoc000066400000000000000000000003101506101734100270360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:149 [source, python] ---- resp = client.indices.analyze( analyzer="standard", text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/76e02434835630cb830724beb92df354.asciidoc000066400000000000000000000026361506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1433 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } }, { "text_similarity_reranker": { "retriever": { "standard": { "query": { "term": { "topic": "ai" } } } }, "field": "text", "inference_id": "my-rerank-model", "inference_text": "Can I use generative AI to identify user intent and improve search relevance?" } } ], "rank_window_size": 10, "rank_constant": 1 } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77082b1ffaae9ac52dfc133fa597baa7.asciidoc000066400000000000000000000005461506101734100270000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:241 [source, python] ---- resp = client.search( index="cooking_blog", query={ "match": { "description": { "query": "fluffy pancakes", "operator": "and" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7709a48020a6cefbbe547fb944541cdb.asciidoc000066400000000000000000000011401506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:421 [source, python] ---- resp = client.bulk( index="my-bit-vectors", refresh=True, operations=[ { "index": { "_id": "1" } }, { "my_vector": [ 127, -127, 0, 1, 42 ] }, { "index": { "_id": "2" } }, { "my_vector": "8100012a7f" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7741a04e7e621c528cd72848d875776d.asciidoc000066400000000000000000000003401506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-new-data-stream.asciidoc:56 [source, python] ---- resp = client.indices.create_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77447e2966708e92f5e219d43ac3f00d.asciidoc000066400000000000000000000003261506101734100262770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:232 [source, python] ---- resp = client.tasks.list( actions="*reindex", wait_for_completion=True, timeout="10s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/774bfde8793dc4927f7cad2dd91c5b5f.asciidoc000066400000000000000000000011131506101734100267430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/multi-search-template-api.asciidoc:44 [source, python] ---- resp = client.msearch_template( index="my-index", search_templates=[ {}, { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 } }, {}, { "id": "my-other-search-template", "params": { "query_type": "match_all" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77518e8c6198acfe77c0934fd2fe65cb.asciidoc000066400000000000000000000053351506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // text-structure/apis/find-message-structure.asciidoc:93 [source, python] ---- resp = client.text_structure.find_message_structure( messages=[ "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128", "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]", "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]", "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]", "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]", "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]", "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]", "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]", "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]", "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]", "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled", "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled", "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled", "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]", "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]", "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized", "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7752b677825523bfb0c38ad9325a6d47.asciidoc000066400000000000000000000003511506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/delete-connector-api.asciidoc:79 [source, python] ---- resp = client.connector.delete( connector_id="another-connector", delete_sync_jobs=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/776b553df0e507c96dbdbaedecaca0cc.asciidoc000066400000000000000000000004471506101734100272140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:987 [source, python] ---- resp = client.ml.infer_trained_model( model_id="model2", docs=[ { "text_field": "The movie was awesome!!" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7777326c6052fee28061e5b82540aedc.asciidoc000066400000000000000000000005561506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:402 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "grade_percentiles": { "percentiles": { "field": "grade", "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7781b13b0ffff6026d10c4e3ab4a3a51.asciidoc000066400000000000000000000003661506101734100265360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/put-analytics-collection.asciidoc:55 [source, python] ---- resp = client.search_application.put_behavioral_analytics( name="my_analytics_collection", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77828fcaecc3f058c48b955928198ff6.asciidoc000066400000000000000000000014611506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/grok.asciidoc:132 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "parse multiple patterns", "processors": [ { "grok": { "field": "message", "patterns": [ "%{FAVORITE_DOG:pet}", "%{FAVORITE_CAT:pet}" ], "pattern_definitions": { "FAVORITE_DOG": "beagle", "FAVORITE_CAT": "burmese" } } } ] }, docs=[ { "_source": { "message": "I love burmese cats!" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77b90f6787195767b6da60d8532714b4.asciidoc000066400000000000000000000010011506101734100261370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-azure-openai.asciidoc:147 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="azure_openai_embeddings", inference_config={ "service": "azureopenai", "service_settings": { "api_key": "", "resource_name": "", "deployment_id": "", "api_version": "2024-02-01" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77c099c97ea6911e2dd6e996da7dcca0.asciidoc000066400000000000000000000003571506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-hot-threads.asciidoc:78 [source, python] ---- resp = client.nodes.hot_threads() print(resp) resp1 = client.nodes.hot_threads( node_id="nodeId1,nodeId2", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/77c50f982906718ecc59aa708aed728f.asciidoc000066400000000000000000000005221506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:299 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script={ "source": "ctx._source.counter += params.count", "lang": "painless", "params": { "count": 4 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77ca1a3193f75651e0bf9e8fe5227a04.asciidoc000066400000000000000000000004161506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-job-model-snapshot-upgrade-stats.asciidoc:127 [source, python] ---- resp = client.ml.get_model_snapshot_upgrade_stats( job_id="low_request_rate", snapshot_id="_all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77cebba946fe648873a1e7375c13df41.asciidoc000066400000000000000000000005311506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:215 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.disk.watermark.low": "90%", "cluster.routing.allocation.disk.watermark.high": "95%" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77d0780c5faea4c9ec51a322a6811b3b.asciidoc000066400000000000000000000033631506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1309 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "timestamp": "2020-04-30T14:30:17-05:00", "message": "40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:30:53-05:00", "message": "232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:12-05:00", "message": "26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:19-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] \"GET /french/splash_inet.html HTTP/1.0\" 200 3781" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:22-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:27-05:00", "message": "252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:28-05:00", "message": "not a valid apache log" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/77e3dcd87d2b2c8e6ec842462b02df1f.asciidoc000066400000000000000000000003271506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clone-index.asciidoc:16 [source, python] ---- resp = client.indices.clone( index="my-index-000001", target="cloned-my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/78043831fd32004a82930c8ac8a1d809.asciidoc000066400000000000000000000031421506101734100261700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1378 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "text_similarity_reranker": { "retriever": { "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } } ], "rank_window_size": 10, "rank_constant": 1 } }, "field": "text", "inference_id": "my-rerank-model", "inference_text": "What are the state of the art applications of AI in information retrieval?" } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/78176cd6f570e1534bb40b19e6e900b6.asciidoc000066400000000000000000000002251506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/alias.asciidoc:93 [source, python] ---- resp = client.cat.aliases( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/783c4fa5351a242364210fc32496beb2.asciidoc000066400000000000000000000005731506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/concurrency-control.asciidoc:102 [source, python] ---- resp = client.index( index="products", id="1567", if_seq_no="362", if_primary_term="2", document={ "product": "r2d2", "details": "A resourceful astromech droid", "tags": [ "droid" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7841b65a3bb880ed66cec453925a50cf.asciidoc000066400000000000000000000003611506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:380 [source, python] ---- resp = client.delete_by_query( index="my-index-000001,my-index-000002", query={ "match_all": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7846974b47a3eab1832a475663d23ad9.asciidoc000066400000000000000000000014161506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:292 [source, python] ---- resp = client.search( size=10000, query={ "match": { "user.id": "elkbee" } }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", "keep_alive": "1m" }, sort=[ { "@timestamp": { "order": "asc", "format": "strict_date_optional_time_nanos" } } ], search_after=[ "2021-05-20T05:30:04.832Z", 4294967298 ], track_total_hits=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7885ca9d7c61050095288eef6bc6cca9.asciidoc000066400000000000000000000010501506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/jwt-realm.asciidoc:676 [source, python] ---- resp = client.security.put_role_mapping( name="jwt8_users", refresh=True, roles=[ "user" ], rules={ "all": [ { "field": { "realm.name": "jwt8" } }, { "field": { "username": "principalname1" } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7888c509774a2abfe82ca370c43d8789.asciidoc000066400000000000000000000005631506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:4 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "cohere-embeddings", "pipeline": "cohere_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/78c4035e4fbf6851140660f6ed2a1fa5.asciidoc000066400000000000000000000002171506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/stats.asciidoc:121 [source, python] ---- resp = client.indices.stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/78c96113ae4ed0054e581b17542528a7.asciidoc000066400000000000000000000005401506101734100261760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:409 [source, python] ---- resp = client.reindex( source={ "index": "source", "query": { "match": { "company": "cat" } } }, dest={ "index": "dest", "routing": "=cat" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/78e20b4cff470ed7357de1fd74bcfeb7.asciidoc000066400000000000000000000006701506101734100270160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:137 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "remove": { "index": "index1", "alias": "logs-non-existing" } }, { "add": { "index": "index2", "alias": "logs-non-existing" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/790684b45bef2bb848ea932f0fd0cfbd.asciidoc000066400000000000000000000017101506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:539 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "all_of": { "ordered": False, "max_gaps": 1, "intervals": [ { "match": { "query": "my favorite food", "max_gaps": 0, "ordered": True } }, { "match": { "query": "cold porridge", "max_gaps": 4, "ordered": True } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/790c49fe2ec638e5e8db51a9236bba35.asciidoc000066400000000000000000000013751506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:133 [source, python] ---- resp = client.search( index="my_locations,my_geoshapes", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": { "lat": 40.73, "lon": -74.1 }, "bottom_right": { "lat": 40.01, "lon": -71.12 } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7965d4dbafdc7ca9e1ee6759939dd2e8.asciidoc000066400000000000000000000037031506101734100267630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/how-watcher-works.asciidoc:50 [source, python] ---- resp = client.watcher.put_watch( id="log_errors", metadata={ "color": "red" }, trigger={ "schedule": { "interval": "5m" } }, input={ "search": { "request": { "indices": "log-events", "body": { "size": 0, "query": { "match": { "status": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 5 } } }, transform={ "search": { "request": { "indices": "log-events", "body": { "query": { "match": { "status": "error" } } } } } }, actions={ "my_webhook": { "webhook": { "method": "POST", "host": "mylisteninghost", "port": 9200, "path": "/{{watch_id}}", "body": "Encountered {{ctx.payload.hits.total}} errors" } }, "email_administrator": { "email": { "to": "sys.admino@host.domain", "subject": "Encountered {{ctx.payload.hits.total}} errors", "body": "Too many error in the system, see attached data", "attachments": { "attached_data": { "data": { "format": "json" } } }, "priority": "high" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79b43a1bf02fb5b38f54b8d5aa5dab53.asciidoc000066400000000000000000000006371506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:43 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "auto_date_histogram": { "field": "date", "buckets": 5, "format": "yyyy-MM-dd" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79bf91ace935d095d8e44b3ef3fe2efa.asciidoc000066400000000000000000000005751506101734100270300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/diagnose-unassigned-shards.asciidoc:269 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", flat_settings=True, include_defaults=True, ) print(resp) resp1 = client.cluster.get_settings( flat_settings=True, include_defaults=True, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/79cb85efd5e4c435e73b253cb9feabb1.asciidoc000066400000000000000000000012451506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/dissect-syntax.asciidoc:250 [source, python] ---- resp = client.search( index="my-index", runtime_mappings={ "http.response": { "type": "long", "script": "\n String response=dissect('%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{response} %{size}').extract(doc[\"message\"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n " } }, query={ "match": { "http.response": "304" } }, fields=[ "http.response" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79d206a528be704050a437adce2496dd.asciidoc000066400000000000000000000010641506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:629 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="my-elastic-rerank", inference_config={ "service": "elasticsearch", "service_settings": { "model_id": ".rerank-v1", "num_threads": 1, "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 1, "max_number_of_allocations": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79e053326a3a8eec828523a035393f66.asciidoc000066400000000000000000000003531506101734100262070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:354 [source, python] ---- resp = client.delete( index=".ds-my-data-stream-2099.03.08-000003", id="bfspvnIBr7VVZlfp2lqX", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79e8bbbd6c440a21b0b4260c8cb1a61c.asciidoc000066400000000000000000000004121506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:207 [source, python] ---- resp = client.index( index="example", document={ "location": "LINESTRING (-77.03653 38.897676, -77.009051 38.889939)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79f33e05b203eb46eef7958fbc95ef77.asciidoc000066400000000000000000000003371506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:93 [source, python] ---- resp = client.ccr.get_auto_follow_pattern( name="my_auto_follow_pattern", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/79feb4a0c0a21b7015a52f9736cd4683.asciidoc000066400000000000000000000027101506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-inner-hits.asciidoc:324 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "comments": { "type": "nested", "properties": { "votes": { "type": "nested" } } } } }, ) print(resp) resp1 = client.index( index="test", id="1", refresh=True, document={ "title": "Test title", "comments": [ { "author": "kimchy", "text": "comment text", "votes": [] }, { "author": "nik9000", "text": "words words words", "votes": [ { "value": 1, "voter": "kimchy" }, { "value": -1, "voter": "other" } ] } ] }, ) print(resp1) resp2 = client.search( index="test", query={ "nested": { "path": "comments.votes", "query": { "match": { "comments.votes.voter": "kimchy" } }, "inner_hits": {} } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/79ff4e7fa5c004226d05d7e2bfb5dc1e.asciidoc000066400000000000000000000022561506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/passthrough.asciidoc:134 [source, python] ---- resp = client.indices.put_index_template( name="my-metrics", index_patterns=[ "metrics-mymetrics-*" ], priority=200, data_stream={}, template={ "settings": { "index.mode": "time_series" }, "mappings": { "properties": { "attributes": { "type": "passthrough", "priority": 10, "time_series_dimension": True, "properties": { "host.name": { "type": "keyword" } } }, "cpu": { "type": "integer", "time_series_metric": "counter" } } } }, ) print(resp) resp1 = client.index( index="metrics-mymetrics-test", document={ "@timestamp": "2020-01-01T00:00:00.000Z", "attributes": { "host.name": "foo", "zone": "bar" }, "cpu": 10 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7a0c633a67244e9703344d036e584d95.asciidoc000066400000000000000000000003611506101734100261250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/enable-user-profile.asciidoc:60 [source, python] ---- resp = client.security.enable_user_profile( uid="u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a0eb2222fe282d3aab66e12feff2a3b.asciidoc000066400000000000000000000021041506101734100267470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:832 [source, python] ---- resp = client.index( index="ip_location", refresh=True, document={ "ip": "192.168.1.1", "country": "Canada", "city": "Montreal" }, ) print(resp) resp1 = client.index( index="logs", id="1", refresh=True, document={ "host": "192.168.1.1", "message": "the first message" }, ) print(resp1) resp2 = client.index( index="logs", id="2", refresh=True, document={ "host": "192.168.1.2", "message": "the second message" }, ) print(resp2) resp3 = client.search( index="logs", runtime_mappings={ "location": { "type": "lookup", "target_index": "ip_location", "input_field": "host", "target_field": "ip", "fetch_fields": [ "country", "city" ] } }, fields=[ "host", "message", "location" ], source=False, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/7a23a385a63c87cab58fd494870450fd.asciidoc000066400000000000000000000010521506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:181 [source, python] ---- resp = client.security.put_role_mapping( name="mapping4", roles=[ "superuser" ], enabled=True, rules={ "any": [ { "field": { "username": "esadmin" } }, { "field": { "groups": "cn=admins,dc=example,dc=com" } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a27336a61284d079f3cc3994cf927d1.asciidoc000066400000000000000000000032061506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/dls-overview.asciidoc:283 [source, python] ---- resp = client.security.create_api_key( name="my-api-key", role_descriptors={ "role-source1": { "indices": [ { "names": [ "source1" ], "privileges": [ "read" ], "query": { "template": { "params": { "access_control": [ "example.user@example.com", "source1-user-group" ] } }, "source": "..." } } ] }, "role-source2": { "indices": [ { "names": [ "source2" ], "privileges": [ "read" ], "query": { "template": { "params": { "access_control": [ "example.user@example.com", "source2-user-group" ] } }, "source": "..." } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a2b9a7b2b6553a48bd4db60a939c0fc.asciidoc000066400000000000000000000006461506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:331 [source, python] ---- resp = client.index( index="test_index", id="1", refresh=True, document={ "query": { "match": { "body": { "query": "miss bicycl", "analyzer": "whitespace" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a2fdfd7b0553d63440af7598f9ad867.asciidoc000066400000000000000000000007211506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:63 [source, python] ---- resp = client.indices.create( index="my-index-000003", mappings={ "properties": { "inference_field": { "type": "semantic_text", "inference_id": "my-elser-endpoint-for-ingest", "search_inference_id": "my-elser-endpoint-for-search" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a3a7fbd81e5050b42e8c1eca26c7c1d.asciidoc000066400000000000000000000003531506101734100267020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/async-search.asciidoc:340 [source, python] ---- resp = client.async_search.delete( id="FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a8de5606f283f4ef171b015eef6befa.asciidoc000066400000000000000000000003011506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/stats.asciidoc:149 [source, python] ---- resp = client.indices.stats( metric="search", groups="group1,group2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7a987cd13383bdc990155d7bd5fb221e.asciidoc000066400000000000000000000010661506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:114 [source, python] ---- resp = client.security.put_role( name="test_role5", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "*" ], "except": [ "customer.handle" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ab968a61bb0783f563dd6d29b253901.asciidoc000066400000000000000000000027541506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:379 [source, python] ---- resp = client.indices.create( index="catalan_example", settings={ "analysis": { "filter": { "catalan_elision": { "type": "elision", "articles": [ "d", "l", "m", "n", "s", "t" ], "articles_case": True }, "catalan_stop": { "type": "stop", "stopwords": "_catalan_" }, "catalan_keywords": { "type": "keyword_marker", "keywords": [ "example" ] }, "catalan_stemmer": { "type": "stemmer", "language": "catalan" } }, "analyzer": { "rebuilt_catalan": { "tokenizer": "standard", "filter": [ "catalan_elision", "lowercase", "catalan_stop", "catalan_keywords", "catalan_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ae434b3667c589a8e70fe560f4ee3f9.asciidoc000066400000000000000000000003161506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:18 [source, python] ---- resp = client.update_by_query( index="my-index-000001", conflicts="proceed", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7af1f62b0cf496cbf593d83d30b472cc.asciidoc000066400000000000000000000014231506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:226 [source, python] ---- resp = client.security.create_api_key( name="music-connector", role_descriptors={ "music-connector-role": { "cluster": [ "monitor", "manage_connector" ], "indices": [ { "names": [ "music", ".search-acl-filter-music", ".elastic-connectors*" ], "privileges": [ "all" ], "allow_restricted_indices": False } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b3e913368e96eaa6e22e0d03c81310e.asciidoc000066400000000000000000000003601506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/store.asciidoc:30 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.store.type": "hybridfs" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b3f255d28ce5b46d111402b96b41351.asciidoc000066400000000000000000000005701506101734100262430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/run-as-privilege.asciidoc:170 [source, python] ---- resp = client.security.put_user( username="admin_user", refresh=True, password="l0ng-r4nd0m-p@ssw0rd", roles=[ "my_admin_role" ], full_name="Eirian Zola", metadata={ "intelligence": 7 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b5c231526846f2f7b98d78f3656ae6a.asciidoc000066400000000000000000000003511506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:364 [source, python] ---- resp = client.update( index="test", id="1", doc={ "name": "new_name" }, doc_as_upsert=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b7a828c21c856a3cbc41fd2f85108bf.asciidoc000066400000000000000000000006111506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:483 [source, python] ---- resp = client.indices.refresh() print(resp) resp1 = client.search( index="my-index-000001", size="0", filter_path="hits.total", query={ "range": { "http.response.bytes": { "lt": 2000000 } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7b864d61767ab283cfd5f9b9ba784b1f.asciidoc000066400000000000000000000002771506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:207 [source, python] ---- resp = client.security.get_api_key( name="my-api-key", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b908b1189f076942de8cd497ff1fa59.asciidoc000066400000000000000000000006321506101734100264560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:216 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "quick brown fox", "type": "most_fields", "fields": [ "title", "title.original", "title.shingles" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7b9dfe5857bde1bd8483ea3241656714.asciidoc000066400000000000000000000004151506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/whitespace-tokenizer.asciidoc:14 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ba29f0be2297b54a640b0a17d7ef5ca.asciidoc000066400000000000000000000003271506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-ip-location-database.asciidoc:16 [source, python] ---- resp = client.ingest.delete_ip_location_database( id="my-database-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7bdc283b96c7a965fae23013647b8578.asciidoc000066400000000000000000000007621506101734100263640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:220 [source, python] ---- resp = client.indices.create( index="test-index", mappings={ "properties": { "source_field": { "type": "text", "copy_to": "infer_field" }, "infer_field": { "type": "semantic_text", "inference_id": ".elser-2-elasticsearch" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7c24d4bef3f2045407fbf1b95c5416f9.asciidoc000066400000000000000000000015061506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:34 [source, python] ---- resp = client.indices.create( index="range_index", settings={ "number_of_shards": 2 }, mappings={ "properties": { "expected_attendees": { "type": "integer_range" }, "time_frame": { "type": "date_range", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } }, ) print(resp) resp1 = client.index( index="range_index", id="1", refresh=True, document={ "expected_attendees": { "gte": 10, "lt": 20 }, "time_frame": { "gte": "2015-10-31 12:00:00", "lte": "2015-11-01" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7c3414279d47e9c29105d061ed316ef8.asciidoc000066400000000000000000000004641506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:104 [source, python] ---- resp = client.index( index="music", id="1", refresh=True, document={ "suggest": [ "Nevermind", "Nirvana" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7c4551abbb7a5f3841109f7664bc4aad.asciidoc000066400000000000000000000012361506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/pattern-analyzer.asciidoc:267 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "camel": { "type": "pattern", "pattern": "([^\\p{L}\\d]+)|(?<=\\D)(?=\\d)|(?<=\\d)(?=\\D)|(?<=[\\p{L}&&[^\\p{Lu}]])(?=\\p{Lu})|(?<=\\p{Lu})(?=\\p{Lu}[\\p{L}&&[^\\p{Lu}]])" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="camel", text="MooseX::FTPClass2_beta", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7c5aed55a2a1dce4b63c18e1ce8146ff.asciidoc000066400000000000000000000043241506101734100267750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:14 [source, python] ---- resp = client.indices.create( index="network-traffic", mappings={ "properties": { "ipv4": { "type": "ip" }, "ipv6": { "type": "ip" } } }, ) print(resp) resp1 = client.bulk( index="network-traffic", refresh=True, operations=[ { "index": { "_id": 0 } }, { "ipv4": "192.168.1.10", "ipv6": "2001:db8:a4f8:112a:6001:0:12:7f10" }, { "index": { "_id": 1 } }, { "ipv4": "192.168.1.12", "ipv6": "2001:db8:a4f8:112a:6001:0:12:7f12" }, { "index": { "_id": 2 } }, { "ipv4": "192.168.1.33", "ipv6": "2001:db8:a4f8:112a:6001:0:12:7f33" }, { "index": { "_id": 3 } }, { "ipv4": "192.168.1.10", "ipv6": "2001:db8:a4f8:112a:6001:0:12:7f10" }, { "index": { "_id": 4 } }, { "ipv4": "192.168.2.41", "ipv6": "2001:db8:a4f8:112c:6001:0:12:7f41" }, { "index": { "_id": 5 } }, { "ipv4": "192.168.2.10", "ipv6": "2001:db8:a4f8:112c:6001:0:12:7f10" }, { "index": { "_id": 6 } }, { "ipv4": "192.168.2.23", "ipv6": "2001:db8:a4f8:112c:6001:0:12:7f23" }, { "index": { "_id": 7 } }, { "ipv4": "192.168.3.201", "ipv6": "2001:db8:a4f8:114f:6001:0:12:7201" }, { "index": { "_id": 8 } }, { "ipv4": "192.168.3.107", "ipv6": "2001:db8:a4f8:114f:6001:0:12:7307" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7c5e41a7c0075d87b8f8348a6efa990c.asciidoc000066400000000000000000000006451506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/managing.asciidoc:102 [source, python] ---- resp = client.ccr.pause_follow( index="follower_index", ) print(resp) resp1 = client.indices.close( index="follower_index", ) print(resp1) resp2 = client.ccr.follow( index="follower_index", wait_for_active_shards="1", remote_cluster="remote_cluster", leader_index="leader_index", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/7c9076f3e93a8f61189783c736bf6082.asciidoc000066400000000000000000000007461506101734100262440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:43 [source, python] ---- resp = client.security.put_role( name="test_role2", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "event_*" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ca224d1a7de20a15c008e1b9dbda377.asciidoc000066400000000000000000000004661506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:807 [source, python] ---- resp = client.search( aggs={ "tags": { "terms": { "field": "tags", "missing": "N/A" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7cd23457e220c8b64c5b0041d2acc27a.asciidoc000066400000000000000000000003021506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/advanced-configuration.asciidoc:123 [source, python] ---- resp = client.nodes.info( node_id="_all", metric="jvm", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7cd3d8388c51a9f6ee3f730cdaddbb89.asciidoc000066400000000000000000000004251506101734100270200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:97 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "refresh_interval": None } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7d1cbcb545aa19260073dbb2b7ef5074.asciidoc000066400000000000000000000015421506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:658 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "size": 2, "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } }, { "product": { "terms": { "field": "product" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7d3a74fe0ba3fe95d1c3275365ff9315.asciidoc000066400000000000000000000016531506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:374 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "flattened": { "type": "flattened" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "flattened": { "field": [ { "id": 1, "name": "foo" }, { "id": 2, "name": "bar" }, { "id": 3, "name": "baz" } ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7d880157a95f64ad339225d4af71c2de.asciidoc000066400000000000000000000007561506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/suggest-user-profile.asciidoc:105 [source, python] ---- resp = client.security.suggest_user_profiles( name="jack", hint={ "uids": [ "u_8RKO7AKfEbSiIHZkZZ2LJy2MUSDPWDr3tMI_CkIGApU_0", "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0" ], "labels": { "direction": [ "north", "east" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7d9eba51a269571ae62fb8b442b373ce.asciidoc000066400000000000000000000014061506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc:25 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "lowercase", "custom_stems", "porter_stem" ] } }, "filter": { "custom_stems": { "type": "stemmer_override", "rules_path": "analysis/stemmer_override.txt" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dabae9b37d2cbd724f2a069be9e753b.asciidoc000066400000000000000000000003401506101734100267760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/reset-job.asciidoc:79 [source, python] ---- resp = client.ml.reset_job( job_id="total-requests", wait_for_completion=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7daff6b7e668ab8a762b8ab5dff7a167.asciidoc000066400000000000000000000022461506101734100270250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:260 [source, python] ---- resp = client.search( index="my-index", query={ "sparse_vector": { "field": "ml.tokens", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "prune": True, "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": False } } }, rescore={ "window_size": 100, "query": { "rescore_query": { "sparse_vector": { "field": "ml.tokens", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "prune": True, "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": True } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7db09cab02d71f3a10d91071216d80fc.asciidoc000066400000000000000000000010361506101734100264470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/ingest-vectors.asciidoc:108 [source, python] ---- resp = client.search( index="amazon-reviews", retriever={ "knn": { "field": "review_vector", "query_vector": [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 ], "k": 2, "num_candidates": 5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7db798942cf2d334456e30ef5fcb801b.asciidoc000066400000000000000000000005031506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:161 [source, python] ---- resp = client.search( index="cooking_blog", query={ "match": { "description": { "query": "fluffy pancakes" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dc6c0a6386289ac6a34105e839ced55.asciidoc000066400000000000000000000010331506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/rate-aggregation.asciidoc:33 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "by_date": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "my_rate": { "rate": { "unit": "year" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dc82f7d36686fd57a47e34cbda39a4e.asciidoc000066400000000000000000000004621506101734100266640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:47 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "delimited_payload" ], text="the|0 brown|10 fox|5 is|0 quick|10", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dd0d9cc6c5982a2c003d301e90feeba.asciidoc000066400000000000000000000016441506101734100267100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:824 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "daily_sales": { "date_histogram": { "field": "order_date", "calendar_interval": "day", "format": "yyyy-MM-dd" }, "aggs": { "revenue": { "sum": { "field": "taxful_total_price" } }, "unique_customers": { "cardinality": { "field": "customer_id" } }, "avg_basket_size": { "avg": { "field": "total_quantity" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dd481337e40f16185f3baa3fc2cce15.asciidoc000066400000000000000000000004441506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/routing-field.asciidoc:38 [source, python] ---- resp = client.search( index="my-index-000001", query={ "terms": { "_routing": [ "user1" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7de7e647c1c9cbe0a1df0d104fc0a947.asciidoc000066400000000000000000000004721506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-s3.asciidoc:23 [source, python] ---- resp = client.snapshot.create_repository( name="my_s3_repository", repository={ "type": "s3", "settings": { "bucket": "my-bucket" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7dedb148ff74912de81b8f8275f0d7f3.asciidoc000066400000000000000000000004441506101734100266100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:174 [source, python] ---- resp = client.search( index="index", aggs={ "price_ranges": { "terms": { "field": "price_range" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7df191cc7f814e410a4ac7261065e6ef.asciidoc000066400000000000000000000002771506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:474 [source, python] ---- resp = client.tasks.list( detailed=True, actions="*byquery", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e126e2751311db60cfcbb22c9c41caa.asciidoc000066400000000000000000000002111506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/shards.asciidoc:395 [source, python] ---- resp = client.cat.shards() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e16d21cba51eb8960835b63a1a7266a.asciidoc000066400000000000000000000006421506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:103 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_date_formats": [ "MM/dd/yyyy" ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "create_date": "09/25/2015" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7e20b6e15e409b02a5e452ceddf1e1e0.asciidoc000066400000000000000000000016501506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:579 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": { "field": "product", "order": "asc" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e2b9bf4ab353c377b761101775edf93.asciidoc000066400000000000000000000017461506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-tsds.asciidoc:220 [source, python] ---- resp = client.bulk( index="metrics-weather_sensors-dev", operations=[ { "create": {} }, { "@timestamp": "2099-05-06T16:21:15.000Z", "sensor_id": "HAL-000001", "location": "plains", "temperature": 26.7, "humidity": 49.9 }, { "create": {} }, { "@timestamp": "2099-05-06T16:25:42.000Z", "sensor_id": "SYKENET-000001", "location": "swamp", "temperature": 32.4, "humidity": 88.9 } ], ) print(resp) resp1 = client.index( index="metrics-weather_sensors-dev", document={ "@timestamp": "2099-05-06T16:21:15.000Z", "sensor_id": "SYKENET-000001", "location": "swamp", "temperature": 32.4, "humidity": 88.9 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7e484b8b41f9dbc2bcf1f340db197c1d.asciidoc000066400000000000000000000003701506101734100267150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:31 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e48648ca27024831c60b455e836c496.asciidoc000066400000000000000000000010371506101734100261350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/pinned-query.asciidoc:55 [source, python] ---- resp = client.search( query={ "pinned": { "docs": [ { "_index": "my-index-000001", "_id": "1" }, { "_id": "4" } ], "organic": { "match": { "description": "iphone" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e49705769c42895fb7b1e2ca028ff47.asciidoc000066400000000000000000000002661506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/securing-communications/update-tls-certificates.asciidoc:713 [source, python] ---- resp = client.cat.nodes() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e4cb3de3e3c75646b60f9f81ddc59cc.asciidoc000066400000000000000000000004421506101734100267360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc:49 [source, python] ---- resp = client.ml.clear_trained_model_deployment_cache( model_id="elastic__distilbert-base-uncased-finetuned-conll03-english", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e5faa551f2c95ffd627da352563d450.asciidoc000066400000000000000000000005511506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:275 [source, python] ---- resp = client.security.put_role_mapping( name="mapping6", roles=[ "example-user" ], enabled=True, rules={ "field": { "dn": "*,ou=subtree,dc=example,dc=com" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e74d1a54e816e8f40cfdaa01b070788.asciidoc000066400000000000000000000020041506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rrf.asciidoc:250 [source, python] ---- resp = client.search( index="example-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "term": { "text": "rrf" } } } }, { "knn": { "field": "vector", "query_vector": [ 3 ], "k": 5, "num_candidates": 5 } } ], "rank_window_size": 5, "rank_constant": 1 } }, size=3, aggs={ "int_count": { "terms": { "field": "integer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7e77509ab646276ff78f58bb38bec8dd.asciidoc000066400000000000000000000003211506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/delete-query-ruleset.asciidoc:75 [source, python] ---- resp = client.query_rules.delete_ruleset( ruleset_id="my-ruleset", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ebeb6cf26be5b5ecdfd408bd0fc3215.asciidoc000066400000000000000000000021511506101734100271300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:1248 [source, python] ---- resp = client.indices.create( index="my-knn-index", mappings={ "properties": { "my-vector": { "type": "dense_vector", "dims": 3, "index": True, "similarity": "l2_norm" } } }, ) print(resp) resp1 = client.bulk( index="my-knn-index", refresh=True, operations=[ { "index": { "_id": "1" } }, { "my-vector": [ 1, 5, -20 ] }, { "index": { "_id": "2" } }, { "my-vector": [ 42, 8, -15 ] }, { "index": { "_id": "3" } }, { "my-vector": [ 15, 11, 23 ] } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7ebfb30b3ece855c1b783d9210939469.asciidoc000066400000000000000000000003441506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/flush-job.asciidoc:108 [source, python] ---- resp = client.ml.flush_job( job_id="total-requests", advance_time="1514804400000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ed26b34ce90192a1563dcddf0e45dc0.asciidoc000066400000000000000000000013071506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/derivative-aggregation.asciidoc:43 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } }, "sales_deriv": { "derivative": { "buckets_path": "sales" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7f1fade93225f8cf6000b93334d76ce4.asciidoc000066400000000000000000000011151506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/ip-location.asciidoc:188 [source, python] ---- resp = client.ingest.put_pipeline( id="ip_location", description="Add ip geolocation info", processors=[ { "ip_location": { "field": "ip" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="ip_location", document={ "ip": "80.231.5.0" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/7f2d511cb64743c006225e5933a14bb4.asciidoc000066400000000000000000000015651506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-across-clusters.asciidoc:69 [source, python] ---- resp = client.security.put_role( name="remote1", cluster=[ "cross_cluster_search" ], indices=[ { "names": [ "" ], "privileges": [ "read" ] } ], remote_indices=[ { "names": [ "logs-*" ], "privileges": [ "read", "read_cross_cluster" ], "clusters": [ "my_remote_cluster" ] } ], remote_cluster=[ { "privileges": [ "monitor_enrich" ], "clusters": [ "my_remote_cluster" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7f37031fb40b68a61255b7c71d7eed0b.asciidoc000066400000000000000000000004251506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:305 [source, python] ---- resp = client.watcher.execute_watch( id="my_watch", action_modes={ "action1": "force_simulate", "action2": "skip" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7f514e9e785e4323d16396359cb184f2.asciidoc000066400000000000000000000006321506101734100262260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:195 [source, python] ---- resp = client.indices.put_mapping( index="range_index", properties={ "ip_allowlist": { "type": "ip_range" } }, ) print(resp) resp1 = client.index( index="range_index", id="2", document={ "ip_allowlist": "192.168.0.0/16" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7f56755fb6c42f7e6203339a6d0cb6e6.asciidoc000066400000000000000000000005111506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:283 [source, python] ---- resp = client.search( query={ "match": { "message": { "query": "ny city", "auto_generate_synonyms_phrase_query": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7f92ddd4e940a37d6227c43fd279c8f5.asciidoc000066400000000000000000000004521506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:759 [source, python] ---- resp = client.search( index="my-index-000001", size=1, query={ "match": { "client_ip": "211.11.9.0" } }, fields=[ "*" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fb921376cbf66bf9f381bcdd62030ba.asciidoc000066400000000000000000000002541506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/apis/get-script-contexts-api.asciidoc:16 [source, python] ---- resp = client.get_script_context() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fbebf0fc9b4a402917a4723ad547c6a.asciidoc000066400000000000000000000006411506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/snapshot/corrupt-repository.asciidoc:147 [source, python] ---- resp = client.snapshot.create_repository( name="my-repo", repository={ "type": "s3", "settings": { "bucket": "repo-bucket", "client": "elastic-internal-71bcd3", "base_path": "myrepo" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fd2532f4e12e3efbc58af195060b31e.asciidoc000066400000000000000000000007741506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/pattern-replace-charfilter.asciidoc:205 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "text": "The fooBarBaz method" }, ) print(resp) resp1 = client.search( index="my-index-000001", query={ "match": { "text": "bar" } }, highlight={ "fields": { "text": {} } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/7fd5883564d183603e60b37d286ac7e2.asciidoc000066400000000000000000000003121506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-expired-data.asciidoc:70 [source, python] ---- resp = client.ml.delete_expired_data( timeout="1h", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fde3ff91c4a2e7080444af37d5cd287.asciidoc000066400000000000000000000010441506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:289 [source, python] ---- resp = client.esql.query( query="\n FROM library\n | EVAL year = DATE_EXTRACT(\"year\", release_date)\n | WHERE page_count > ?page_count AND author == ?author\n | STATS count = COUNT(*) by year\n | WHERE count > ?count\n | LIMIT 5\n ", params=[ { "page_count": 300 }, { "author": "Frank Herbert" }, { "count": 0 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fe2179705304af5e87eb382dca6235a.asciidoc000066400000000000000000000002761506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:318 [source, python] ---- resp = client.indices.open( index="logs-my_app-default", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fe9f0a583e079f7fc6fd64d12b6e9e5.asciidoc000066400000000000000000000013761506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/sum-aggregation.asciidoc:54 [source, python] ---- resp = client.search( index="sales", size="0", runtime_mappings={ "price.weighted": { "type": "double", "script": "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n " } }, query={ "constant_score": { "filter": { "match": { "type": "hat" } } } }, aggs={ "hat_prices": { "sum": { "field": "price.weighted" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7fef68840761c6982c14ad7af96caf37.asciidoc000066400000000000000000000006761506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/nested.asciidoc:24 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "group": "fans", "user": [ { "first": "John", "last": "Smith" }, { "first": "Alice", "last": "White" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/7ff4124df0541ee2496034004f4146d4.asciidoc000066400000000000000000000005071506101734100261710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/eager-global-ordinals.asciidoc:74 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "tags": { "type": "keyword", "eager_global_ordinals": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/800861c15bb33ca01a46fb97dde7537a.asciidoc000066400000000000000000000003051506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-filter.asciidoc:72 [source, python] ---- resp = client.ml.get_filters( filter_id="safe_domains", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/80135e8c644e34cc70ce8a4e7915d1a2.asciidoc000066400000000000000000000014441506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:315 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information", processors=[ { "attachment": { "field": "data", "indexed_chars": 11, "indexed_chars_field": "max_size", "remove_binary": True } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id_2", pipeline="attachment", document={ "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=", "max_size": 5 }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id_2", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/803bbc14fbec0e49dfed9fab49c8a7f8.asciidoc000066400000000000000000000004641506101734100271560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/term-query.asciidoc:99 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "full_text": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8051766cadded0892290bc2cc06e145c.asciidoc000066400000000000000000000003331506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/ack-watch.asciidoc:251 [source, python] ---- resp = client.watcher.ack_watch( watch_id="my_watch", action_id="action1,action2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/805f5550b90e75aa5cc82b90d8c6c242.asciidoc000066400000000000000000000012041506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significanttext-aggregation.asciidoc:221 [source, python] ---- resp = client.search( index="news", query={ "match": { "content": "elasticsearch" } }, aggs={ "sample": { "sampler": { "shard_size": 100 }, "aggs": { "keywords": { "significant_text": { "field": "content", "filter_duplicate_text": True } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/807c0c9763f8c1114b3c8278c2a0cb56.asciidoc000066400000000000000000000024651506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:28 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "all_of": { "ordered": True, "intervals": [ { "match": { "query": "my favorite food", "max_gaps": 0, "ordered": True } }, { "any_of": { "intervals": [ { "match": { "query": "hot water" } }, { "match": { "query": "cold porridge" } } ] } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8080cd9e24a8785728ce7c372ec4acf1.asciidoc000066400000000000000000000005021506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/how-watcher-works.asciidoc:159 [source, python] ---- resp = client.perform_request( "PUT", "/_watcher/settings", headers={"Content-Type": "application/json"}, body={ "index.routing.allocation.include.role": "watcher" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/808f4db1e2361be77dd6816c1f818139.asciidoc000066400000000000000000000002721506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shard-stores.asciidoc:19 [source, python] ---- resp = client.indices.shard_stores( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/80dbaf28d1976dc00de3fe2018067e81.asciidoc000066400000000000000000000003461506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-management/migrate-index-allocation-filters.asciidoc:132 [source, python] ---- resp = client.indices.delete_template( name=".cloud-hot-warm-allocation-0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/80dd7f5882c59b9c1c90e8351937441f.asciidoc000066400000000000000000000014011506101734100263020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-update-api-keys.asciidoc:182 [source, python] ---- resp = client.security.bulk_update_api_keys( ids=[ "VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij" ], role_descriptors={ "role-a": { "indices": [ { "names": [ "*" ], "privileges": [ "write" ] } ] } }, metadata={ "environment": { "level": 2, "trusted": True, "tags": [ "production" ] } }, expiration="30d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/80edd2124a822d9f9bf22ecc49d2c2e9.asciidoc000066400000000000000000000003401506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/get-synonym-rule.asciidoc:72 [source, python] ---- resp = client.synonyms.get_synonym_rule( set_id="my-synonyms-set", rule_id="test-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/812a3d7ab461d74efd9136aaf4bcf11c.asciidoc000066400000000000000000000005611506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-field-note.asciidoc:49 [source, python] ---- resp = client.search( index="range_index", size="0", aggs={ "range_histo": { "histogram": { "field": "expected_attendees", "interval": 5 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/812deb6b7668c7444f3b99d843d2adc1.asciidoc000066400000000000000000000017511506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/shape-query.asciidoc:132 [source, python] ---- resp = client.indices.create( index="shapes", mappings={ "properties": { "geometry": { "type": "shape" } } }, ) print(resp) resp1 = client.index( index="shapes", id="footprint", document={ "geometry": { "type": "envelope", "coordinates": [ [ 1355, 5355 ], [ 1400, 5200 ] ] } }, ) print(resp1) resp2 = client.search( index="example", query={ "shape": { "geometry": { "indexed_shape": { "index": "shapes", "id": "footprint", "path": "geometry" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/8141b60ad245ece2ff5e8d0817400ee5.asciidoc000066400000000000000000000005561506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql-search-api.asciidoc:684 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence by process.pid\n [ file where file.name == \"cmd.exe\" and process.pid != 2013 ]\n [ process where stringContains(process.executable, \"regsvr32\") ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8141cdaddbe7d794f09f9ee84e46194c.asciidoc000066400000000000000000000002601506101734100266640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/count.asciidoc:73 [source, python] ---- resp = client.cat.count( index="my-index-000001", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/81612c2537386e031b7eb604f6756a71.asciidoc000066400000000000000000000005001506101734100261120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clone-index.asciidoc:123 [source, python] ---- resp = client.indices.clone( index="my_source_index", target="my_target_index", settings={ "index.number_of_shards": 5 }, aliases={ "my_search_indices": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8194f1fae6aa72ab91ea559daad932d4.asciidoc000066400000000000000000000004741506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:169 [source, python] ---- resp = client.search( index="my-index-000001", max_concurrent_shard_requests="3", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/819e00cc6547d925d80090b94e0650d7.asciidoc000066400000000000000000000006551506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:243 [source, python] ---- resp = client.search( index="my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/81aad155ff23b1b396833b1182c9d46b.asciidoc000066400000000000000000000004131506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/disk-usage-exceeded.asciidoc:35 [source, python] ---- resp = client.cat.shards( v=True, ) print(resp) resp1 = client.cat.recovery( v=True, active_only=True, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/81c7a392efd505b686eed978fb7d9d17.asciidoc000066400000000000000000000024471506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:636 [source, python] ---- resp = client.indices.create( index="english_example", settings={ "analysis": { "filter": { "english_stop": { "type": "stop", "stopwords": "_english_" }, "english_keywords": { "type": "keyword_marker", "keywords": [ "example" ] }, "english_stemmer": { "type": "stemmer", "language": "english" }, "english_possessive_stemmer": { "type": "stemmer", "language": "possessive_english" } }, "analyzer": { "rebuilt_english": { "tokenizer": "standard", "filter": [ "english_possessive_stemmer", "lowercase", "english_stop", "english_keywords", "english_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/81ee2ad368208c4c78098292547b0577.asciidoc000066400000000000000000000005421506101734100261400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/mapping-roles.asciidoc:180 [source, python] ---- resp = client.security.put_role_mapping( name="admin_user", roles=[ "monitoring" ], rules={ "field": { "dn": "cn=Admin,ou=example,o=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/81ef5774355180fc44d2a52b5182d24a.asciidoc000066400000000000000000000005411506101734100262530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/string-stats-aggregation.asciidoc:24 [source, python] ---- resp = client.search( index="my-index-000001", size="0", aggs={ "message_stats": { "string_stats": { "field": "message.keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/81f1b1e1d5c81683b6bf471c469e6046.asciidoc000066400000000000000000000012101506101734100263360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:81 [source, python] ---- resp = client.search( index="shirts", query={ "bool": { "filter": [ { "term": { "color": "red" } }, { "term": { "brand": "gucci" } } ] } }, aggs={ "models": { "terms": { "field": "model" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8206a7cc615ad93fec322513b8fdd4fd.asciidoc000066400000000000000000000005701506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-set-query.asciidoc:107 [source, python] ---- resp = client.index( index="job-candidates", id="2", refresh=True, document={ "name": "Jason Response", "programming_languages": [ "java", "php" ], "required_matches": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/820f689eaaef15fc07abd1073fa880f8.asciidoc000066400000000000000000000004241506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:11 [source, python] ---- resp = client.search( from_=5, size=20, query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/821422f8a03dc98d024a15fc737fe9eb.asciidoc000066400000000000000000000004421506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/delete-trained-models-aliases.asciidoc:57 [source, python] ---- resp = client.ml.delete_trained_model_alias( model_id="flight-delay-prediction-1574775339910", model_alias="flight_delay_model", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/821ac598f5f4a795a13f8dd0c0c4d8d6.asciidoc000066400000000000000000000003201506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-tsds.asciidoc:243 [source, python] ---- resp = client.indices.create_data_stream( name="metrics-weather_sensors-dev", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/824fded1f9db28906ae7e85ae8de9bd0.asciidoc000066400000000000000000000010471506101734100270250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-resume-follow.asciidoc:90 [source, python] ---- resp = client.ccr.resume_follow( index="follower_index", max_read_request_operation_count=1024, max_outstanding_read_requests=16, max_read_request_size="1024k", max_write_request_operation_count=32768, max_write_request_size="16k", max_outstanding_write_requests=8, max_write_buffer_count=512, max_write_buffer_size="512k", max_retry_delay="10s", read_poll_timeout="30s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/827b7e9308ea288f18aea00a5accc38e.asciidoc000066400000000000000000000003101506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-component-template.asciidoc:46 [source, python] ---- resp = client.cluster.get_component_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/82844ef45e11c0eece100d3109db3182.asciidoc000066400000000000000000000010631506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-amazon-bedrock.asciidoc:180 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="amazon_bedrock_completion", inference_config={ "service": "amazonbedrock", "service_settings": { "access_key": "", "secret_key": "", "region": "us-east-1", "provider": "amazontitan", "model": "amazon.titan-text-premier-v1:0" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/828f0045747fde4888a947bb99e190e3.asciidoc000066400000000000000000000012001506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:837 [source, python] ---- resp = client.search( index="movies", retriever={ "rule": { "match_criteria": { "query_string": "harry potter" }, "ruleset_ids": [ "my-ruleset" ], "retriever": { "standard": { "query": { "query_string": { "query": "harry potter" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/829a40d484c778a8c58340c7bf09e1d8.asciidoc000066400000000000000000000013221506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:195 [source, python] ---- resp = client.search( query={ "match": { "message": { "operator": "or", "query": "the quick brown" } } }, rescore={ "window_size": 50, "query": { "rescore_query": { "match_phrase": { "message": { "query": "the quick brown", "slop": 2 } } }, "query_weight": 0.7, "rescore_query_weight": 1.2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/82bb6c61dab959f4446dc5ecab7ecbdf.asciidoc000066400000000000000000000016021506101734100271430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/chat-completion-inference.asciidoc:322 [source, python] ---- resp = client.inference.stream_inference( task_type="chat_completion", inference_id="openai-completion", messages=[ { "role": "assistant", "content": "Let's find out what the weather is", "tool_calls": [ { "id": "call_KcAjWtAww20AihPHphUh46Gd", "type": "function", "function": { "name": "get_current_weather", "arguments": "{\"location\":\"Boston, MA\"}" } } ] }, { "role": "tool", "content": "The weather is cold", "tool_call_id": "call_KcAjWtAww20AihPHphUh46Gd" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/82d6de3081de7b0664f44adf2942675a.asciidoc000066400000000000000000000003671506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/list-analytics-collection.asciidoc:91 [source, python] ---- resp = client.search_application.get_behavioral_analytics( name="my_analytics_collection", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/82e94b6cdf65e324575f916b3776b779.asciidoc000066400000000000000000000006461506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:538 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "strings_as_keywords": { "match_mapping_type": "string", "runtime": {} } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83062a543163370328cf2e21a68c1bd3.asciidoc000066400000000000000000000006711506101734100261620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-wait-for-snapshot.asciidoc:40 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "delete": { "actions": { "wait_for_snapshot": { "policy": "slm-policy-name" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/831f65d700577e11112c711236110f61.asciidoc000066400000000000000000000011421506101734100257300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/pattern-analyzer.asciidoc:180 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_email_analyzer": { "type": "pattern", "pattern": "\\W|_", "lowercase": True } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_email_analyzer", text="John_Smith@foo-bar.com", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/8330b2ea6317769e52d0647ba434b354.asciidoc000066400000000000000000000005401506101734100261710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:268 [source, python] ---- resp = client.mget( routing="key1", docs=[ { "_index": "test", "_id": "1", "routing": "key2" }, { "_index": "test", "_id": "2" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8345d2615f43a934fe1871a5120eca1d.asciidoc000066400000000000000000000023261506101734100263300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/ecommerce-tutorial.asciidoc:77 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_ecommerce", "query": { "bool": { "filter": { "term": { "currency": "EUR" } } } } }, pivot={ "group_by": { "customer_id": { "terms": { "field": "customer_id" } } }, "aggregations": { "total_quantity.sum": { "sum": { "field": "total_quantity" } }, "taxless_total_price.sum": { "sum": { "field": "taxless_total_price" } }, "total_quantity.max": { "max": { "field": "total_quantity" } }, "order_id.cardinality": { "cardinality": { "field": "order_id" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/834764b2fba6cbb41eaabd740be75656.asciidoc000066400000000000000000000011111506101734100266230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc:384 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_analyzer": { "tokenizer": "standard", "filter": [ "keyword_repeat", "porter_stem", "remove_duplicates" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8357aa6099089940589ae3e97e7bcffa.asciidoc000066400000000000000000000002511506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:362 [source, python] ---- resp = client.indices.get_data_stream() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83780c8f5f17eb21064c1ba6e0a7aa10.asciidoc000066400000000000000000000004141506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/wrapper-query.asciidoc:10 [source, python] ---- resp = client.search( query={ "wrapper": { "query": "eyJ0ZXJtIiA6IHsgInVzZXIuaWQiIDogImtpbWNoeSIgfX0=" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/838a4eabebba4c06100fb37dc30c7722.asciidoc000066400000000000000000000014731506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-search.asciidoc:84 [source, python] ---- resp = client.rollup.put_job( id="sensor", index_pattern="sensor-*", rollup_index="sensor_rollup", cron="*/30 * * * * ?", page_size=1000, groups={ "date_histogram": { "field": "timestamp", "fixed_interval": "1h", "delay": "7d" }, "terms": { "fields": [ "node" ] } }, metrics=[ { "field": "temperature", "metrics": [ "min", "max", "sum" ] }, { "field": "voltage", "metrics": [ "avg" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/839710129a165cf93c6e329abedf9089.asciidoc000066400000000000000000000011301506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-cross-cluster-api-key.asciidoc:89 [source, python] ---- resp = client.perform_request( "POST", "/_security/cross_cluster/api_key", headers={"Content-Type": "application/json"}, body={ "name": "my-cross-cluster-api-key", "access": { "search": [ { "names": [ "logs*" ] } ] }, "metadata": { "application": "search" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/839a4b2930856790e34cc9dfeb983284.asciidoc000066400000000000000000000006441506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling.asciidoc:129 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "downsample": { "fixed_interval": "1h" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83b94f9e7b3a9abca8e165ea56927714.asciidoc000066400000000000000000000003631506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:386 [source, python] ---- resp = client.indices.create( index="", aliases={ "my-write-alias": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83cd4eb89818b4c32f654d370eafa920.asciidoc000066400000000000000000000005651506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keep-types-tokenfilter.asciidoc:41 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "keep_types", "types": [ "" ] } ], text="1 quick fox 2 lazy dogs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83d712b9ffb2e703212b762eba3c521a.asciidoc000066400000000000000000000003261506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:46 [source, python] ---- resp = client.search( index="my-alias", ignore_unavailable=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83d8c920460a12f87b9d5bf65515c367.asciidoc000066400000000000000000000015021506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:342 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_moving_sum": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83dd715e45a5da097123c6d10f22f8f4.asciidoc000066400000000000000000000015731506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-containing-query.asciidoc:10 [source, python] ---- resp = client.search( query={ "span_containing": { "little": { "span_term": { "field1": "foo" } }, "big": { "span_near": { "clauses": [ { "span_term": { "field1": "bar" } }, { "span_term": { "field1": "baz" } } ], "slop": 5, "in_order": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/83dfd0852101eca3ba8174c9c38b4e73.asciidoc000066400000000000000000000002661506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // monitoring/indices.asciidoc:112 [source, python] ---- resp = client.indices.get_template( name=".monitoring-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/840b6c5c3d9c56aed854cfab8da04486.asciidoc000066400000000000000000000042561506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:195 [source, python] ---- resp = client.indices.create( index="file-path-test", settings={ "analysis": { "analyzer": { "custom_path_tree": { "tokenizer": "custom_hierarchy" }, "custom_path_tree_reversed": { "tokenizer": "custom_hierarchy_reversed" } }, "tokenizer": { "custom_hierarchy": { "type": "path_hierarchy", "delimiter": "/" }, "custom_hierarchy_reversed": { "type": "path_hierarchy", "delimiter": "/", "reverse": "true" } } } }, mappings={ "properties": { "file_path": { "type": "text", "fields": { "tree": { "type": "text", "analyzer": "custom_path_tree" }, "tree_reversed": { "type": "text", "analyzer": "custom_path_tree_reversed" } } } } }, ) print(resp) resp1 = client.index( index="file-path-test", id="1", document={ "file_path": "/User/alice/photos/2017/05/16/my_photo1.jpg" }, ) print(resp1) resp2 = client.index( index="file-path-test", id="2", document={ "file_path": "/User/alice/photos/2017/05/16/my_photo2.jpg" }, ) print(resp2) resp3 = client.index( index="file-path-test", id="3", document={ "file_path": "/User/alice/photos/2017/05/16/my_photo3.jpg" }, ) print(resp3) resp4 = client.index( index="file-path-test", id="4", document={ "file_path": "/User/alice/photos/2017/05/15/my_photo1.jpg" }, ) print(resp4) resp5 = client.index( index="file-path-test", id="5", document={ "file_path": "/User/bob/photos/2017/05/16/my_photo1.jpg" }, ) print(resp5) ---- python-elasticsearch-9.1.1/docs/examples/84108653e9e03b4edacd878ec870df77.asciidoc000066400000000000000000000021461506101734100265270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1043 [source, python] ---- resp = client.indices.create( index="hungarian_example", settings={ "analysis": { "filter": { "hungarian_stop": { "type": "stop", "stopwords": "_hungarian_" }, "hungarian_keywords": { "type": "keyword_marker", "keywords": [ "példa" ] }, "hungarian_stemmer": { "type": "stemmer", "language": "hungarian" } }, "analyzer": { "rebuilt_hungarian": { "tokenizer": "standard", "filter": [ "lowercase", "hungarian_stop", "hungarian_keywords", "hungarian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8417d8d35ec5fc5665dfb2f95d6d1101.asciidoc000066400000000000000000000011701506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:131 [source, python] ---- resp = client.search( index=".watcher-history*", pretty=True, query={ "bool": { "must": [ { "match": { "result.condition.met": True } }, { "range": { "result.execution_time": { "gte": "now-10s" } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/841ad0a70f4271f61f0bac0b467b59c5.asciidoc000066400000000000000000000006031506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-termvectors.asciidoc:97 [source, python] ---- resp = client.mtermvectors( index="my-index-000001", docs=[ { "_id": "2", "fields": [ "message" ], "term_statistics": True }, { "_id": "1" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/841d8b766902c8e3ae85c228a31383ac.asciidoc000066400000000000000000000004231506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/get-async-sql-search-status-api.asciidoc:18 [source, python] ---- resp = client.sql.get_async_status( id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", format="json", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84237aa9da49ab4b4c4e2b21d2548df2.asciidoc000066400000000000000000000003421506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/verify-repo-integrity-api.asciidoc:31 [source, python] ---- resp = client.snapshot.repository_verify_integrity( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84243213614fe64930b1d430704afb29.asciidoc000066400000000000000000000007551506101734100261110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1014 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "voltage_corrected": { "type": "double", "script": { "source": "\n emit(doc['voltage'].value * params['multiplier'])\n ", "params": { "multiplier": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84465de841fe5c6099a0382f786f2cb8.asciidoc000066400000000000000000000004621506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:76 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "remove": { "index": "logs-nginx.access-prod", "alias": "logs" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8478c39c71bbb559ef6ab919f918f22b.asciidoc000066400000000000000000000006271506101734100265340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1223 [source, python] ---- resp = client.eql.search( index="my-data-stream", filter={ "range": { "@timestamp": { "gte": "now-1d/d", "lt": "now/d" } } }, query="\n file where (file.type == \"file\" and file.name == \"cmd.exe\")\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8494d09c39e109a012094eb9d6ec52ac.asciidoc000066400000000000000000000005671506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/pipeline.asciidoc:36 [source, python] ---- resp = client.ingest.put_pipeline( id="pipelineA", description="inner pipeline", processors=[ { "set": { "field": "inner_pipeline_set", "value": "inner" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84c61160ca815e29e9973ba1380219dd.asciidoc000066400000000000000000000003121506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // searchable-snapshots/apis/shard-stats.asciidoc:79 [source, python] ---- resp = client.searchable_snapshots.stats( index="my-index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84c69fb07050f0e89720007a6507a221.asciidoc000066400000000000000000000003301506101734100261010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/high-cpu-usage.asciidoc:118 [source, python] ---- resp = client.tasks.cancel( task_id="oTUltX4IQMOUUVeiohTt8A:464", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84e2cf7417c9e0c9e6f3c23031001440.asciidoc000066400000000000000000000002401506101734100262370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/enrich-stats.asciidoc:135 [source, python] ---- resp = client.enrich.stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84edb44c5b74426f448b2baa101092d6.asciidoc000066400000000000000000000004441506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:75 [source, python] ---- resp = client.search( index="range_index", query={ "term": { "expected_attendees": { "value": 12 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84ef9fe951c6d3caa7438238a5b23319.asciidoc000066400000000000000000000004341506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:487 [source, python] ---- resp = client.search( index="cooking_blog", query={ "term": { "author.keyword": "Maria Rodriguez" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84f2f0cea90340bdd041421afdb58ec3.asciidoc000066400000000000000000000010351506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:7 [source, python] ---- resp = client.indices.create( index="index1", mappings={ "properties": { "comment": { "type": "text", "analyzer": "standard", "fields": { "english": { "type": "text", "analyzer": "english" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/84f3e8524f6ff80e870c03ab71551538.asciidoc000066400000000000000000000004751506101734100263010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:79 [source, python] ---- resp = client.search( index="my-index-000001", preference="my-custom-shard-string", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/850bfd0a00d32475a54ac7f87fb4cc4d.asciidoc000066400000000000000000000012061506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:563 [source, python] ---- resp = client.search( index="my-index-000001", runtime_mappings={ "measures.voltage": { "type": "double", "script": { "source": "if (doc['model_number.keyword'].value.equals('HG537PU'))\n {emit(1.7 * params._source['measures']['voltage']);}\n else{emit(params._source['measures']['voltage']);}" } } }, query={ "match": { "model_number": "HG537PU" } }, fields=[ "measures.voltage" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/851f9754dbefc099c54c5423ca4565c0.asciidoc000066400000000000000000000006221506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:107 [source, python] ---- resp = client.search( index="network-traffic", size=0, aggs={ "ipv6-subnets": { "ip_prefix": { "field": "ipv6", "prefix_length": 64, "is_ipv6": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/852b394d78b8c79ee0055b5501981a4b.asciidoc000066400000000000000000000012261506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:607 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "product_name": { "terms": { "field": "product", "missing_bucket": True, "missing_order": "last" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/853fc710cea79fb4e1a85fb6d149f9c5.asciidoc000066400000000000000000000023351506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:876 [source, python] ---- resp = client.search( index="movies", retriever={ "rule": { "match_criteria": { "query_string": "harry potter" }, "ruleset_ids": [ "my-ruleset" ], "retriever": { "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "sorcerer's stone" } } } }, { "standard": { "query": { "query_string": { "query": "chamber of secrets" } } } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85479e02af00681210e17e3d0ff51e21.asciidoc000066400000000000000000000005701506101734100262440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date.asciidoc:93 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "date": { "type": "date", "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85519a614ae18c998986d46bbad82b76.asciidoc000066400000000000000000000007321506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:100 [source, python] ---- resp = client.indices.put_index_template( name="my_template", index_patterns=[ "test-*" ], template={ "settings": { "number_of_shards": 1, "number_of_replicas": 1, "index.lifecycle.name": "my_policy", "index.lifecycle.rollover_alias": "test-alias" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8566f5ecf4ae14802ba63c8cc7c629f8.asciidoc000066400000000000000000000006341506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:216 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="mistral_embeddings", inference_config={ "service": "mistral", "service_settings": { "api_key": "", "model": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/856c10ad554c26b70f1121454caff40a.asciidoc000066400000000000000000000005461506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:250 [source, python] ---- resp = client.search( index="byte-image-index", knn={ "field": "byte-image-vector", "query_vector": "fb09", "k": 10, "num_candidates": 100 }, fields=[ "title" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8582e918a6275472d2eba2e95f1dbe77.asciidoc000066400000000000000000000017421506101734100264450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/disk-usage-exceeded.asciidoc:65 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.disk.watermark.low": "90%", "cluster.routing.allocation.disk.watermark.low.max_headroom": "100GB", "cluster.routing.allocation.disk.watermark.high": "95%", "cluster.routing.allocation.disk.watermark.high.max_headroom": "20GB", "cluster.routing.allocation.disk.watermark.flood_stage": "97%", "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": "5GB", "cluster.routing.allocation.disk.watermark.flood_stage.frozen": "97%", "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": "5GB" }, ) print(resp) resp1 = client.indices.put_settings( index="*", expand_wildcards="all", settings={ "index.blocks.read_only_allow_delete": None }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/858fde15fb0a0340873b123043f8c3b4.asciidoc000066400000000000000000000020261506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/histogram.asciidoc:118 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "my_text": "histogram_1", "my_histogram": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", document={ "my_text": "histogram_2", "my_histogram": { "values": [ 0.1, 0.25, 0.35, 0.4, 0.45, 0.5 ], "counts": [ 8, 17, 8, 7, 6, 2 ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/85ae90b63ecba9d2bad16144b054c0a1.asciidoc000066400000000000000000000007171506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:535 [source, python] ---- resp = client.sql.query( format="txt", runtime_mappings={ "release_day_of_week": { "type": "keyword", "script": "\n emit(doc['release_date'].value.dayOfWeekEnum.toString())\n " } }, query="\n SELECT * FROM library WHERE page_count > 300 AND author = 'Frank Herbert'\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85d2e33791f1a74a69dfb04a60e69306.asciidoc000066400000000000000000000025561506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/actions.asciidoc:57 [source, python] ---- resp = client.watcher.put_watch( id="error_logs_alert", metadata={ "color": "red" }, trigger={ "schedule": { "interval": "5m" } }, input={ "search": { "request": { "indices": "log-events", "body": { "size": 0, "query": { "match": { "status": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 5 } } }, actions={ "email_administrator": { "throttle_period": "15m", "email": { "to": "sys.admino@host.domain", "subject": "Encountered {{ctx.payload.hits.total}} errors", "body": "Too many error in the system, see attached data", "attachments": { "attached_data": { "data": { "format": "json" } } }, "priority": "high" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85e2719d9fd6d2c2d47d28d39f2e3f7e.asciidoc000066400000000000000000000002701506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/feature-migration.asciidoc:53 [source, python] ---- resp = client.migration.get_feature_upgrade_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85f0e5e8ab91ceab63c21dbedd9f4037.asciidoc000066400000000000000000000021241506101734100267760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:737 [source, python] ---- resp = client.indices.create( index="finnish_example", settings={ "analysis": { "filter": { "finnish_stop": { "type": "stop", "stopwords": "_finnish_" }, "finnish_keywords": { "type": "keyword_marker", "keywords": [ "esimerkki" ] }, "finnish_stemmer": { "type": "stemmer", "language": "finnish" } }, "analyzer": { "rebuilt_finnish": { "tokenizer": "standard", "filter": [ "lowercase", "finnish_stop", "finnish_keywords", "finnish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85f2839beeb71edb66988e5c82188be0.asciidoc000066400000000000000000000010011506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/update-license.asciidoc:69 [source, python] ---- resp = client.license.post( licenses=[ { "uid": "893361dc-9749-4997-93cb-802e3d7fa4xx", "type": "basic", "issue_date_in_millis": 1411948800000, "expiry_date_in_millis": 1914278399999, "max_nodes": 1, "issued_to": "issuedTo", "issuer": "issuer", "signature": "xx" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85f6667f148d16d075493fddf07e2932.asciidoc000066400000000000000000000005041506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:616 [source, python] ---- resp = client.reindex( source={ "index": ".ds-my-data-stream-2099.03.07-000001" }, dest={ "index": "new-data-stream", "op_type": "create" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/85f9fc6f98e8573efed9b034e853d5ae.asciidoc000066400000000000000000000006101506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:289 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="use_existing_deployment", inference_config={ "service": "elasticsearch", "service_settings": { "deployment_id": ".elser_model_2" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8619bd17bbfe33490b1f277007f654db.asciidoc000066400000000000000000000007561506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-cohere.asciidoc:214 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="cohere-rerank", inference_config={ "service": "cohere", "service_settings": { "api_key": "", "model_id": "rerank-english-v3.0" }, "task_settings": { "top_n": 10, "return_documents": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/861f5f61409dc87f3671293b87839ff7.asciidoc000066400000000000000000000002621506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/stats.asciidoc:1542 [source, python] ---- resp = client.cluster.stats( human=True, pretty=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8621c05cc7cf3880bde751f6670a0c3a.asciidoc000066400000000000000000000004551506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:439 [source, python] ---- resp = client.indices.put_settings( index=".reindexed-v9-ml-anomalies-custom-example", settings={ "index": { "number_of_replicas": 0 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/86280dcb49aa89083be4b2644daf1b7c.asciidoc000066400000000000000000000003051506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-job.asciidoc:240 [source, python] ---- resp = client.ml.get_jobs( job_id="high_sum_total_sales", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/862907653d1c18d2e80eff7f421200e2.asciidoc000066400000000000000000000005301506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:677 [source, python] ---- resp = client.security.put_role_mapping( name="saml-example", roles=[ "example_role" ], enabled=True, rules={ "field": { "realm.name": "saml1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/863253bf0ab7d227ff72a0a384f4de8c.asciidoc000066400000000000000000000003731506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:673 [source, python] ---- resp = client.cluster.put_settings( persistent={ "indices.lifecycle.poll_interval": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8634c9993485d622fb12d24f4f242264.asciidoc000066400000000000000000000011001506101734100261240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:433 [source, python] ---- resp = client.indices.modify_data_stream( actions=[ { "remove_backing_index": { "data_stream": "my-data-stream", "index": ".ds-my-data-stream-2023.07.26-000001" } }, { "add_backing_index": { "data_stream": "my-data-stream", "index": ".ds-my-data-stream-2023.07.26-000001-downsample" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/867f7d43a78066731ead2e223960fc07.asciidoc000066400000000000000000000003651506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:408 [source, python] ---- resp = client.cluster.put_settings( persistent={ "action.destructive_requires_name": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8684589e31d96ab229e8c4feb4d704bb.asciidoc000066400000000000000000000003161506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/get-enrich-policy.asciidoc:130 [source, python] ---- resp = client.enrich.get_policy( name="my-policy,other-policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/86926bcebf213ac182d4373027554858.asciidoc000066400000000000000000000004761506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:13 [source, python] ---- resp = client.indices.create( index="my_index", mappings={ "properties": { "my_counter": { "type": "unsigned_long" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8696ba08ca6cc4992110c331732e5f47.asciidoc000066400000000000000000000005431506101734100262630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/boxplot-aggregation.asciidoc:205 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "grade_boxplot": { "boxplot": { "field": "grade", "missing": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8699d35269a47ba867fa8cc766287413.asciidoc000066400000000000000000000002411506101734100262350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/start-basic.asciidoc:48 [source, python] ---- resp = client.license.post_start_basic() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/86c5594c4ec551391096c1abcd652b50.asciidoc000066400000000000000000000006051506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:125 [source, python] ---- resp = client.search( index="my_index", query={ "match_all": {} }, script_fields={ "count10": { "script": { "source": "Long.divideUnsigned(doc['my_counter'].value, 10)" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8703f3b1b3895543abc36e2a7a0013d3.asciidoc000066400000000000000000000007601506101734100263200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/prioritization.asciidoc:17 [source, python] ---- resp = client.indices.create( index="index_1", ) print(resp) resp1 = client.indices.create( index="index_2", ) print(resp1) resp2 = client.indices.create( index="index_3", settings={ "index.priority": 10 }, ) print(resp2) resp3 = client.indices.create( index="index_4", settings={ "index.priority": 5 }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/871154d08efd7251cf3272e758f06acf.asciidoc000066400000000000000000000014151506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/common-grams-tokenfilter.asciidoc:126 [source, python] ---- resp = client.indices.create( index="common_grams_example", settings={ "analysis": { "analyzer": { "index_grams": { "tokenizer": "whitespace", "filter": [ "common_grams" ] } }, "filter": { "common_grams": { "type": "common_grams", "common_words": [ "a", "is", "the" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8731188553e14134b0a533010318f91a.asciidoc000066400000000000000000000007131506101734100257360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:70 [source, python] ---- resp = client.search( query={ "terms": { "force": [ "British Transport Police" ] } }, aggregations={ "significant_crime_types": { "significant_terms": { "field": "crime_type" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8739fad1fb2323950b673acf0c9f2ff5.asciidoc000066400000000000000000000002611506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/open-close.asciidoc:126 [source, python] ---- resp = client.indices.open( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/873e2333734b1cf5ed066596e5f74b0a.asciidoc000066400000000000000000000037421506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geocentroid-aggregation.asciidoc:13 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "city": "Amsterdam", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "city": "Amsterdam", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "city": "Amsterdam", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "city": "Antwerp", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "city": "Paris", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "city": "Paris", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggs={ "centroid": { "geo_centroid": { "field": "location" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/873fbbc6ab81409058591385fd602736.asciidoc000066400000000000000000000017451506101734100262210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:171 [source, python] ---- resp = client.index( index="drivers", id="1", document={ "driver": { "last_name": "McQueen", "vehicle": [ { "make": "Powell Motors", "model": "Canyonero" }, { "make": "Miller-Meteor", "model": "Ecto-1" } ] } }, ) print(resp) resp1 = client.index( index="drivers", id="2", refresh=True, document={ "driver": { "last_name": "Hudson", "vehicle": [ { "make": "Mifune", "model": "Mach Five" }, { "make": "Miller-Meteor", "model": "Ecto-1" } ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/87416e6a1ca2da324dbed6deb05303eb.asciidoc000066400000000000000000000007361506101734100267010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/count.asciidoc:112 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "user.id": "kimchy" }, ) print(resp) resp1 = client.count( index="my-index-000001", q="user:kimchy", ) print(resp1) resp2 = client.count( index="my-index-000001", query={ "term": { "user.id": "kimchy" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/8743887d9b89ea1a2d5e780c349972cf.asciidoc000066400000000000000000000007771506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/collapse-search-results.asciidoc:263 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "GET /search" } }, collapse={ "field": "geo.country_name", "inner_hits": { "name": "by_location", "collapse": { "field": "user.id" }, "size": 3 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87457bb3467484bec3e9df4e25942ba6.asciidoc000066400000000000000000000003351506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:275 [source, python] ---- resp = client.esql.query( query="FROM mv | EVAL b=MV_MIN(b) | EVAL b + 2, a + b | LIMIT 4", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87469f8b7e9b965408479d276c3ce8aa.asciidoc000066400000000000000000000003441506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/list-analytics-collection.asciidoc:111 [source, python] ---- resp = client.search_application.get_behavioral_analytics( name="my*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87733deeea4b441b595d19a0f97346f0.asciidoc000066400000000000000000000002631506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // health/health.asciidoc:479 [source, python] ---- resp = client.health_report( feature="shards_availability", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/877ea90c663b5df9efe95717646a666f.asciidoc000066400000000000000000000021451506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:159 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "group": { "type": "keyword" }, "user": { "type": "nested", "properties": { "first": { "type": "keyword" }, "last": { "type": "keyword" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "group": "fans", "user": [ { "first": "John", "last": "Smith" }, { "first": "Alice", "last": "White" } ] }, ) print(resp1) resp2 = client.search( index="my-index-000001", fields=[ "*" ], source=False, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/87846c3ddacab1da4af626ae8099e4be.asciidoc000066400000000000000000000005371506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/mapping-roles.asciidoc:190 [source, python] ---- resp = client.security.put_role_mapping( name="basic_user", roles=[ "user" ], rules={ "field": { "dn": "cn=John Doe,ou=example,o=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87b0b496747ad6c1e4ab4b462128fa1c.asciidoc000066400000000000000000000002711506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/nodeattrs.asciidoc:119 [source, python] ---- resp = client.cat.nodeattrs( v=True, h="name,pid,attr,value", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87c3e9963400a3e4b296ef8d1c86fae3.asciidoc000066400000000000000000000003321506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-roles-cache.asciidoc:55 [source, python] ---- resp = client.security.clear_cached_roles( name="my_admin_role,my_test_role", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87c42ef733a50954e4d757fc0a08decc.asciidoc000066400000000000000000000004021506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:261 [source, python] ---- resp = client.security.create_api_key( name="my-api-key-1", metadata={ "application": "my-application" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87d970b4944b6d742c484d7184996c8a.asciidoc000066400000000000000000000004741506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:708 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "query_string": "Where is the best place for mountain climbing?" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/87f854393d715aabf4d45e90a8eb74ce.asciidoc000066400000000000000000000006141506101734100266010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/median-absolute-deviation-aggregation.asciidoc:173 [source, python] ---- resp = client.search( index="reviews", size=0, aggs={ "review_variability": { "median_absolute_deviation": { "field": "rating", "missing": 5 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88195d87a350e7fff200131f410c3e88.asciidoc000066400000000000000000000012201506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:70 [source, python] ---- resp = client.search( index="sales", aggs={ "price_ranges": { "range": { "field": "price", "keyed": True, "ranges": [ { "to": 100 }, { "from": 100, "to": 200 }, { "from": 200 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88341b4eba71ec722f3e38fa1696fe87.asciidoc000066400000000000000000000025341506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:40 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_ecommerce" }, dest={ "index": "sample_ecommerce_orders_by_customer" }, pivot={ "group_by": { "user": { "terms": { "field": "user" } }, "customer_id": { "terms": { "field": "customer_id" } } }, "aggregations": { "order_count": { "value_count": { "field": "order_id" } }, "total_order_amt": { "sum": { "field": "taxful_total_price" } }, "avg_amt_per_order": { "avg": { "field": "taxful_total_price" } }, "avg_unique_products_per_order": { "avg": { "field": "total_unique_products" } }, "total_unique_products": { "cardinality": { "field": "products.product_id" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88554b79dba8fd79991855a692b69ff9.asciidoc000066400000000000000000000023041506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // graph/explore.asciidoc:315 [source, python] ---- resp = client.graph.explore( index="clicklogs", query={ "match": { "query.raw": "midi" } }, controls={ "use_significance": False, "sample_size": 2000, "timeout": 2000, "sample_diversity": { "field": "category.raw", "max_docs_per_value": 500 } }, vertices=[ { "field": "product", "size": 5, "min_doc_count": 10, "shard_min_doc_count": 3 } ], connections={ "query": { "bool": { "filter": [ { "range": { "query_time": { "gte": "2015-10-01 00:00:00" } } } ] } }, "vertices": [ { "field": "query.raw", "size": 5, "min_doc_count": 10, "shard_min_doc_count": 3 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88a08d0b15ef41324f5c23db533d47d1.asciidoc000066400000000000000000000004111506101734100264000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/standard-tokenizer.asciidoc:16 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88a283dfccc481f1afba79d9b3c61f51.asciidoc000066400000000000000000000003121506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-user.asciidoc:117 [source, python] ---- resp = client.perform_request( "GET", "/_security/_query/user", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88b19973b970adf9b73fca82017d4951.asciidoc000066400000000000000000000004221506101734100263600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:36 [source, python] ---- resp = client.search( index="my-index-*", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88cecae3f0363fc186d955dd8616b5d4.asciidoc000066400000000000000000000004031506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/get-async-eql-status-api.asciidoc:90 [source, python] ---- resp = client.eql.get_status( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", keep_alive="5d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/88cf60d3310a56d8ae12704abc05b565.asciidoc000066400000000000000000000002461506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/get-trial-status.asciidoc:46 [source, python] ---- resp = client.license.get_trial_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/894fce12d8f0d01e4c4083885a0c0077.asciidoc000066400000000000000000000005671506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:183 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "mistral-embeddings", "pipeline": "mistral_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8963fb1e3d0900ba3b68be212e8972ee.asciidoc000066400000000000000000000013121506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/position-increment-gap.asciidoc:53 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "names": { "type": "text", "position_increment_gap": 0 } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "names": [ "John Abraham", "Lincoln Smith" ] }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match_phrase": { "names": "Abraham Lincoln" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/897668edcbb0785fa5229aeb2dfc963e.asciidoc000066400000000000000000000005171506101734100266700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:51 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "query": { "match": { "message": "bonsai tree" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89a6b24618cafd60de1702a5b9f28a8d.asciidoc000066400000000000000000000020321506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/phrase-suggest.asciidoc:221 [source, python] ---- resp = client.search( index="test", suggest={ "text": "noble prize", "simple_phrase": { "phrase": { "field": "title.trigram", "size": 1, "direct_generator": [ { "field": "title.trigram", "suggest_mode": "always", "min_word_length": 1 } ], "collate": { "query": { "source": { "match": { "{{field_name}}": "{{suggestion}}" } } }, "params": { "field_name": "title" }, "prune": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89aed93f641a5e243bdc3ee5cdc2acc6.asciidoc000066400000000000000000000063371506101734100270670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:484 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1", "index2" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"should\": [\n {{#text}}\n {\n \"multi_match\": {\n \"query\": \"{{query_string}}\",\n \"fields\": [{{#text_fields}}\"{{name}}^{{boost}}\",{{/text_fields}}],\n \"boost\": \"{{text_query_boost}}\"\n }\n },\n {{/text}}\n {{#elser}}\n {{#elser_fields}}\n {\n \"sparse_vector\": {\n \"field\": \"ml.inference.{{.}}_expanded.predicted_value\",\n \"inference_id\": \"\",\n \"query\": \"{{query_string}}\"\n }\n },\n {{/elser_fields}}\n { \"bool\": { \"must\": [] } },\n {{/elser}}\n {{^text}}\n {{^elser}}\n {\n \"query_string\": {\n \"query\": \"{{query_string}}\",\n \"default_field\": \"{{default_field}}\",\n \"default_operator\": \"{{default_operator}}\",\n \"boost\": \"{{text_query_boost}}\"\n }\n },\n {{/elser}}\n {{/text}}\n { \"bool\": { \"must\": [] } }\n ],\n \"minimum_should_match\": 1\n }\n },\n \"min_score\": \"{{min_score}}\",\n \"explain\": \"{{explain}}\",\n \"from\": \"{{from}}\",\n \"size\": \"{{size}}\"\n }\n ", "params": { "text": False, "elser": False, "elser_fields": [ { "name": "title", "boost": 1 }, { "name": "description", "boost": 1 } ], "text_fields": [ { "name": "title", "boost": 10 }, { "name": "description", "boost": 5 }, { "name": "state", "boost": 1 } ], "query_string": "*", "text_query_boost": 4, "default_field": "*", "default_operator": "OR", "explain": False, "from": 0, "size": 10, "min_score": 0 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89b72dd7f747f6297c2b089e8bc807be.asciidoc000066400000000000000000000005061506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/put-repo-api.asciidoc:16 [source, python] ---- resp = client.snapshot.create_repository( name="my_repository", repository={ "type": "fs", "settings": { "location": "my_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89c57917bc7bd2e6387b5eb54ece37b1.asciidoc000066400000000000000000000004211506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:174 [source, python] ---- resp = client.count( index="my-index-000001", query={ "exists": { "field": "my-field" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89d2a3748dc14c6d5d4c6f94b9b03938.asciidoc000066400000000000000000000003111506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/split-index.asciidoc:50 [source, python] ---- resp = client.indices.add_block( index="my_source_index", block="write", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89dee10a24ea2727af5b00039a4271bd.asciidoc000066400000000000000000000100661506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geoline-aggregation.asciidoc:159 [source, python] ---- resp = client.indices.create( index="tour", mappings={ "properties": { "city": { "type": "keyword", "time_series_dimension": True }, "category": { "type": "keyword" }, "route": { "type": "long" }, "name": { "type": "keyword" }, "location": { "type": "geo_point" }, "@timestamp": { "type": "date" } } }, settings={ "index": { "mode": "time_series", "routing_path": [ "city" ], "time_series": { "start_time": "2023-01-01T00:00:00Z", "end_time": "2024-01-01T00:00:00Z" } } }, ) print(resp) resp1 = client.bulk( index="tour", refresh=True, operations=[ { "index": {} }, { "@timestamp": "2023-01-02T09:00:00Z", "route": 0, "location": "POINT(4.889187 52.373184)", "city": "Amsterdam", "category": "Attraction", "name": "Royal Palace Amsterdam" }, { "index": {} }, { "@timestamp": "2023-01-02T10:00:00Z", "route": 1, "location": "POINT(4.885057 52.370159)", "city": "Amsterdam", "category": "Attraction", "name": "The Amsterdam Dungeon" }, { "index": {} }, { "@timestamp": "2023-01-02T13:00:00Z", "route": 2, "location": "POINT(4.901618 52.369219)", "city": "Amsterdam", "category": "Museum", "name": "Museum Het Rembrandthuis" }, { "index": {} }, { "@timestamp": "2023-01-02T16:00:00Z", "route": 3, "location": "POINT(4.912350 52.374081)", "city": "Amsterdam", "category": "Museum", "name": "NEMO Science Museum" }, { "index": {} }, { "@timestamp": "2023-01-03T12:00:00Z", "route": 4, "location": "POINT(4.914722 52.371667)", "city": "Amsterdam", "category": "Museum", "name": "Nederlands Scheepvaartmuseum" }, { "index": {} }, { "@timestamp": "2023-01-04T09:00:00Z", "route": 5, "location": "POINT(4.401384 51.220292)", "city": "Antwerp", "category": "Attraction", "name": "Cathedral of Our Lady" }, { "index": {} }, { "@timestamp": "2023-01-04T12:00:00Z", "route": 6, "location": "POINT(4.405819 51.221758)", "city": "Antwerp", "category": "Museum", "name": "Snijders&Rockoxhuis" }, { "index": {} }, { "@timestamp": "2023-01-04T15:00:00Z", "route": 7, "location": "POINT(4.405200 51.222900)", "city": "Antwerp", "category": "Museum", "name": "Letterenhuis" }, { "index": {} }, { "@timestamp": "2023-01-05T10:00:00Z", "route": 8, "location": "POINT(2.336389 48.861111)", "city": "Paris", "category": "Museum", "name": "Musée du Louvre" }, { "index": {} }, { "@timestamp": "2023-01-05T14:00:00Z", "route": 9, "location": "POINT(2.327000 48.860000)", "city": "Paris", "category": "Museum", "name": "Musée dOrsay" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/89f547649895176c246bb8c41313ff21.asciidoc000066400000000000000000000005751506101734100261540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-syntax.asciidoc:202 [source, python] ---- resp = client.esql.query( query="\nFROM library\n| EVAL year = DATE_EXTRACT(\"year\", release_date)\n| WHERE page_count > ? AND match(author, ?, {\"minimum_should_match\": ?})\n| LIMIT 5\n", params=[ 300, "Frank Herbert", 2 ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/89f8eac24f3ec6a7668d580aaf0eeefa.asciidoc000066400000000000000000000004551506101734100271020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:292 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "snowball" ], text="detailed output", explain=True, attributes=[ "keyword" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a0b5f759de3f27f0801c1176e616117.asciidoc000066400000000000000000000005401506101734100262570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-semantic-text.asciidoc:36 [source, python] ---- resp = client.indices.create( index="semantic-embeddings", mappings={ "properties": { "content": { "type": "semantic_text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a12cd824404d74f098d854716a26899.asciidoc000066400000000000000000000003331506101734100261420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-datafeed.asciidoc:49 [source, python] ---- resp = client.ml.delete_datafeed( datafeed_id="datafeed-total-requests", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a1b6eae4893c5dd27b3d81fd8d70f5b.asciidoc000066400000000000000000000002551506101734100267320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:467 [source, python] ---- resp = client.tasks.get( task_id="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a1f6cffa653800282c0ae160ee375bc.asciidoc000066400000000000000000000006171506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:161 [source, python] ---- resp = client.update( index="test", id="1", script={ "source": "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", "lang": "painless", "params": { "tag": "blue" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a4941cae0b32d68b22bec2d12c82860.asciidoc000066400000000000000000000005341506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:356 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence by process.pid with maxspan=1h\n [ process where process.name == \"regsvr32.exe\" ]\n [ file where stringContains(file.name, \"scrobj.dll\") ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8a617dbfe5887f8ecc8815de132b6eb0.asciidoc000066400000000000000000000005161506101734100266610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:268 [source, python] ---- resp = client.security.put_user( username="cross-cluster-kibana", password="l0ng-r4nd0m-p@ssw0rd", roles=[ "logstash-reader", "kibana-access" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8aa17bd25a3f2d634e5253b4b72fec4c.asciidoc000066400000000000000000000005361506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/explain-dfanalytics.asciidoc:126 [source, python] ---- resp = client.ml.explain_data_frame_analytics( source={ "index": "houses_sold_last_10_yrs" }, analysis={ "regression": { "dependent_variable": "price" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8aa74aee3dcf4b34028e4c5e1c1ed27b.asciidoc000066400000000000000000000014411506101734100267640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:35 [source, python] ---- resp = client.indices.create( index="bug_reports", mappings={ "properties": { "title": { "type": "text" }, "labels": { "type": "flattened" } } }, ) print(resp) resp1 = client.index( index="bug_reports", id="1", document={ "title": "Results are not sorted correctly.", "labels": { "priority": "urgent", "release": [ "v1.2.5", "v1.3.0" ], "timestamp": { "created": 1541458026, "closed": 1541457010 } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/8ab11a25e017124a70484781ca11fb52.asciidoc000066400000000000000000000004661506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:94 [source, python] ---- resp = client.eql.search( index="my-data-stream", filter_path="-hits.events", query="\n any where process.name == \"regsvr32.exe\" \n ", size=200, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b07372a21a10a16b52e70fc0c87ad4e.asciidoc000066400000000000000000000006001506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/object.asciidoc:11 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "region": "US", "manager": { "age": 30, "name": { "first": "John", "last": "Smith" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b301122cbf42be6eafeda714a36559e.asciidoc000066400000000000000000000014551506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/logstash/put-pipeline.asciidoc:80 [source, python] ---- resp = client.logstash.put_pipeline( id="my_pipeline", pipeline={ "description": "Sample pipeline for illustration purposes", "last_modified": "2021-01-02T02:50:51.250Z", "pipeline_metadata": { "type": "logstash_pipeline", "version": "1" }, "username": "elastic", "pipeline": "input {}\n filter { grok {} }\n output {}", "pipeline_settings": { "pipeline.workers": 1, "pipeline.batch.size": 125, "pipeline.batch.delay": 50, "queue.type": "memory", "queue.max_bytes": "1gb", "queue.checkpoint.writes": 1024 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b38eeb41eb388ee6d92f26b5c0cc48d.asciidoc000066400000000000000000000014241506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:868 [source, python] ---- resp = client.put_script( id="my-prod-tag-script", script={ "lang": "painless", "source": "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n " }, ) print(resp) resp1 = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "drop": { "description": "Drop documents that don't contain 'prod' tag", "if": { "id": "my-prod-tag-script" } } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/8b3a94495127efd9d56b2cd7f3eecdca.asciidoc000066400000000000000000000003061506101734100270130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-role-mappings.asciidoc:70 [source, python] ---- resp = client.security.get_role_mapping( name="mapping1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b5bc6e217b0d33e4c88d84f5c1a0712.asciidoc000066400000000000000000000005151506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/missing-aggregation.asciidoc:12 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "products_without_a_price": { "missing": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b652e3205a5e9e0187f56ce3c36ae4e.asciidoc000066400000000000000000000005531506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/categorize-text-aggregation.asciidoc:158 [source, python] ---- resp = client.search( index="log-messages", filter_path="aggregations", aggs={ "categories": { "categorize_text": { "field": "message" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b7956a2b88fd798a895d3466d671b58.asciidoc000066400000000000000000000003761506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/network/tracers.asciidoc:29 [source, python] ---- resp = client.cluster.put_settings( persistent={ "http.tracer.include": "*", "http.tracer.exclude": "" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8b8b6aac2111b2d8b93758ac737e6543.asciidoc000066400000000000000000000012171506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:224 [source, python] ---- resp = client.indices.create( index="idx_keep", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "path": { "type": "object", "synthetic_source_keep": "all" }, "ids": { "type": "integer", "synthetic_source_keep": "arrays" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8bf1e7a6d529547906ba8b1d6501fa0c.asciidoc000066400000000000000000000005321506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/set-connector-sync-job-error-api.asciidoc:63 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/_sync_job/my-connector-sync-job/_error", headers={"Content-Type": "application/json"}, body={ "error": "some-error" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c2060b0272556457f4871c5d7a589fd.asciidoc000066400000000000000000000006561506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:244 [source, python] ---- resp = client.security.put_role( name="logstash-reader", indices=[ { "names": [ "logstash-*" ], "privileges": [ "read", "view_index_metadata" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c47c80139f40f25db44f5781ca2dfbe.asciidoc000066400000000000000000000003061506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:491 [source, python] ---- resp = client.indices.get_alias( index=".ml-anomalies-custom-example", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c5d48252cd6d1ee26a2bb817f89c78e.asciidoc000066400000000000000000000003121506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-filter.asciidoc:46 [source, python] ---- resp = client.ml.delete_filter( filter_id="safe_domains", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c619666488927dac6ecb7dcebca44c2.asciidoc000066400000000000000000000007511506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:4 [source, python] ---- resp = client.indices.create( index="cohere-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1024, "element_type": "byte" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c639d3eef5c2de29e12bd9c6a42d3d4.asciidoc000066400000000000000000000017011506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:738 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "categories": { "terms": { "field": "category.keyword", "size": 5, "order": { "total_revenue": "desc" } }, "aggs": { "total_revenue": { "sum": { "field": "taxful_total_price" } }, "avg_order_value": { "avg": { "field": "taxful_total_price" } }, "total_items": { "sum": { "field": "total_quantity" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c693e057f6e85fbf2b56ca442719362.asciidoc000066400000000000000000000014051506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/aggregate-metric-double.asciidoc:161 [source, python] ---- resp = client.search( index="stats-index", size="0", aggs={ "metric_min": { "min": { "field": "agg_metric" } }, "metric_max": { "max": { "field": "agg_metric" } }, "metric_value_count": { "value_count": { "field": "agg_metric" } }, "metric_sum": { "sum": { "field": "agg_metric" } }, "metric_avg": { "avg": { "field": "agg_metric" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c6f3bb8abae9ff1d21e776f16ad1c86.asciidoc000066400000000000000000000017501506101734100270120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:580 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="model-flight-delays-pre", source={ "index": [ "kibana_sample_data_flights" ], "query": { "range": { "DistanceKilometers": { "gt": 0 } } }, "_source": { "includes": [], "excludes": [ "FlightDelay", "FlightDelayType" ] } }, dest={ "index": "df-flight-delays", "results_field": "ml-results" }, analysis={ "regression": { "dependent_variable": "FlightDelayMin", "training_percent": 90 } }, analyzed_fields={ "includes": [], "excludes": [ "FlightNum" ] }, model_memory_limit="100mb", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c8b5224befab7804461c7e7b6086d9a.asciidoc000066400000000000000000000011211506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/id-field.asciidoc:14 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "text": "Document with ID 1" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "text": "Document with ID 2" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "terms": { "_id": [ "1", "2" ] } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/8c9081dc738d1290fd76071b283fcaec.asciidoc000066400000000000000000000002761506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:98 [source, python] ---- resp = client.get( index="my-index-000001", id="2", routing="user1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8c92c5e87facbae8dc4f58376ec21815.asciidoc000066400000000000000000000003651506101734100266650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1038 [source, python] ---- resp = client.search( index="my-index-000001", fields=[ "voltage_corrected", "node" ], size=2, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8cbf9b46ce3ccc966c4902d2e0c56317.asciidoc000066400000000000000000000005411506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc:156 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "keyword_repeat", "stemmer" ], text="fox running and jumping", explain=True, attributes="keyword", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8cd00a3aba7c3c158277bc032aac2830.asciidoc000066400000000000000000000031541506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/bulk.asciidoc:620 [source, python] ---- resp = client.bulk( operations=[ { "update": { "_id": "1", "_index": "index1", "retry_on_conflict": 3 } }, { "doc": { "field": "value" } }, { "update": { "_id": "0", "_index": "index1", "retry_on_conflict": 3 } }, { "script": { "source": "ctx._source.counter += params.param1", "lang": "painless", "params": { "param1": 1 } }, "upsert": { "counter": 1 } }, { "update": { "_id": "2", "_index": "index1", "retry_on_conflict": 3 } }, { "doc": { "field": "value" }, "doc_as_upsert": True }, { "update": { "_id": "3", "_index": "index1", "_source": True } }, { "doc": { "field": "value" } }, { "update": { "_id": "4", "_index": "index1" } }, { "doc": { "field": "value" }, "_source": True } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8cef2b98f3fe3a85874f1b48ebe6ec63.asciidoc000066400000000000000000000016741506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/elision-tokenfilter.asciidoc:165 [source, python] ---- resp = client.indices.create( index="elision_case_insensitive_example", settings={ "analysis": { "analyzer": { "default": { "tokenizer": "whitespace", "filter": [ "elision_case_insensitive" ] } }, "filter": { "elision_case_insensitive": { "type": "elision", "articles": [ "l", "m", "t", "qu", "n", "s", "j" ], "articles_case": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d05862be1f9e7edaba162b1888b5677.asciidoc000066400000000000000000000025261506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:50 [source, python] ---- resp = client.indices.put_mapping( index="cooking_blog", properties={ "title": { "type": "text", "analyzer": "standard", "fields": { "keyword": { "type": "keyword", "ignore_above": 256 } } }, "description": { "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "author": { "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "date": { "type": "date", "format": "yyyy-MM-dd" }, "category": { "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "tags": { "type": "text", "fields": { "keyword": { "type": "keyword" } } }, "rating": { "type": "float" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d064eda2199de52e5be9ee68a5b7c68.asciidoc000066400000000000000000000011271506101734100266660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/generate-embeddings.asciidoc:17 [source, python] ---- resp = client.ingest.put_pipeline( id="my-text-embeddings-pipeline", description="Text embedding pipeline", processors=[ { "inference": { "model_id": ".elser_model_2", "input_output": [ { "input_field": "my_text_field", "output_field": "my_tokens" } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d421c5bec38eecce4679b219cacc9db.asciidoc000066400000000000000000000013531506101734100270660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-rank-aggregation.asciidoc:128 [source, python] ---- resp = client.search( index="latency", size=0, runtime_mappings={ "load_time.seconds": { "type": "long", "script": { "source": "emit(doc['load_time'].value / params.timeUnit)", "params": { "timeUnit": 1000 } } } }, aggs={ "load_time_ranks": { "percentile_ranks": { "values": [ 500, 600 ], "field": "load_time.seconds" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d4ca17349e7e82c329cdd854cc670a1.asciidoc000066400000000000000000000003301506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:184 [source, python] ---- resp = client.security.put_role( name="remote-search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d4dda5d988d568f4f4210a6387e026f.asciidoc000066400000000000000000000004101506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-logout-api.asciidoc:72 [source, python] ---- resp = client.security.saml_logout( token="46ToAxZVaXVVZTVKOVF5YU04ZFJVUDVSZlV3", refresh_token="mJdXLtmvTUSpoLwMvdBt_w", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d6631b622f9bfb8fa70154f6fb8b153.asciidoc000066400000000000000000000003331506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:188 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", q="kimchy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d7193902a353872740a3324c60c5001.asciidoc000066400000000000000000000006561506101734100257470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/index-sorting.asciidoc:113 [source, python] ---- resp = client.indices.create( index="events", settings={ "index": { "sort.field": "timestamp", "sort.order": "desc" } }, mappings={ "properties": { "timestamp": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8d9b04f2a97f4229dec9e620126de049.asciidoc000066400000000000000000000003601506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-s3.asciidoc:609 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.com.amazonaws.request": "DEBUG" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8db799543eb084ec71547980863d60b9.asciidoc000066400000000000000000000012651506101734100262370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/sum-bucket-aggregation.asciidoc:42 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "sum_monthly_sales": { "sum_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8de6fed6ba2b94ce6a12ce076be2b4d7.asciidoc000066400000000000000000000002321506101734100270540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/segments.asciidoc:132 [source, python] ---- resp = client.cat.segments( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e06d8b2b737c43806018eae2ca061c1.asciidoc000066400000000000000000000006201506101734100263770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/string-stats-aggregation.asciidoc:178 [source, python] ---- resp = client.search( index="my-index-000001", size="0", aggs={ "message_stats": { "string_stats": { "field": "message.keyword", "missing": "[empty message]" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e0f43829df9af20547ea6896f4c0124.asciidoc000066400000000000000000000010721506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:327 [source, python] ---- resp = client.ilm.put_lifecycle( name="rollover_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_size": "50gb" } } }, "delete": { "min_age": "1d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e208098a0156c4c92afe0a06960b230.asciidoc000066400000000000000000000006351506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-authenticate-api.asciidoc:89 [source, python] ---- resp = client.security.saml_authenticate( content="PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", ids=[ "4fee3b046395c4e751011e97f8900b5273d56685" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e286a205a1f84f888a6d99f2620c80e.asciidoc000066400000000000000000000003541506101734100263640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/logging-config.asciidoc:272 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.deprecation": "OFF" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e2bbef535fef688d397e60e09aefa7f.asciidoc000066400000000000000000000004121506101734100270270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:206 [source, python] ---- resp = client.indices.stats( metric="indexing,search", level="shards", human=True, expand_wildcards="all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e42a17edace2bc6e42c6a1532779937.asciidoc000066400000000000000000000004671506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/max-aggregation.asciidoc:17 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "max_price": { "max": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e43bb5b7946143e69d397bb81d87df0.asciidoc000066400000000000000000000003021506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/get-follow-stats.asciidoc:225 [source, python] ---- resp = client.ccr.follow_stats( index="follower_index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e68cdfad45e7e6dff254d931eea29d4.asciidoc000066400000000000000000000053441506101734100270330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:687 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "@timestamp": "2020-06-21T15:00:01-05:00", "message": "211.11.9.0 - - [2020-06-21T15:00:01-05:00] \"GET /english/index.html HTTP/1.0\" 304 0" }, { "index": {} }, { "@timestamp": "2020-06-21T15:00:01-05:00", "message": "211.11.9.0 - - [2020-06-21T15:00:01-05:00] \"GET /english/index.html HTTP/1.0\" 304 0" }, { "index": {} }, { "@timestamp": "2020-04-30T14:30:17-05:00", "message": "40.135.0.0 - - [2020-04-30T14:30:17-05:00] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "@timestamp": "2020-04-30T14:30:53-05:00", "message": "232.0.0.0 - - [2020-04-30T14:30:53-05:00] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:12-05:00", "message": "26.1.0.0 - - [2020-04-30T14:31:12-05:00] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:19-05:00", "message": "247.37.0.0 - - [2020-04-30T14:31:19-05:00] \"GET /french/splash_inet.html HTTP/1.0\" 200 3781" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:27-05:00", "message": "252.0.0.0 - - [2020-04-30T14:31:27-05:00] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:29-05:00", "message": "247.37.0.0 - - [2020-04-30T14:31:29-05:00] \"GET /images/hm_brdl.gif HTTP/1.0\" 304 0" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:29-05:00", "message": "247.37.0.0 - - [2020-04-30T14:31:29-05:00] \"GET /images/hm_arw.gif HTTP/1.0\" 304 0" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:32-05:00", "message": "247.37.0.0 - - [2020-04-30T14:31:32-05:00] \"GET /images/nav_bg_top.gif HTTP/1.0\" 200 929" }, { "index": {} }, { "@timestamp": "2020-04-30T14:31:43-05:00", "message": "247.37.0.0 - - [2020-04-30T14:31:43-05:00] \"GET /french/images/nav_venue_off.gif HTTP/1.0\" 304 0" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e89fee0be6a436c4e3d7c152659c47e.asciidoc000066400000000000000000000010531506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-scheduling-api.asciidoc:96 [source, python] ---- resp = client.connector.update_scheduling( connector_id="my-connector", scheduling={ "access_control": { "enabled": True, "interval": "0 10 0 * * ?" }, "full": { "enabled": True, "interval": "0 20 0 * * ?" }, "incremental": { "enabled": False, "interval": "0 30 0 * * ?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e92b10ebcfedc76562ab52d0e46b916.asciidoc000066400000000000000000000003071506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:234 [source, python] ---- resp = client.delete_script( id="my-search-template", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e9e7dc5fad2b2b8e74ab4dc225d9c53.asciidoc000066400000000000000000000003161506101734100270060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/common/apis/set-upgrade-mode.asciidoc:102 [source, python] ---- resp = client.ml.set_upgrade_mode( enabled=False, timeout="10m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8e9f7261af6264c92d0eb4d586a176f9.asciidoc000066400000000000000000000007601506101734100264500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/lowercase-tokenfilter.asciidoc:82 [source, python] ---- resp = client.indices.create( index="lowercase_example", settings={ "analysis": { "analyzer": { "whitespace_lowercase": { "tokenizer": "whitespace", "filter": [ "lowercase" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8eac28d2e9b6482b413d61817456a14f.asciidoc000066400000000000000000000010561506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:272 [source, python] ---- resp = client.search( aggs={ "genres": { "terms": { "field": "genre", "order": { "max_play_count": "desc" } }, "aggs": { "max_play_count": { "max": { "field": "play_count" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8ecefdcf8f153cf91588e9fdde8f3e6b.asciidoc000066400000000000000000000005651506101734100272120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:299 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "content", "name^5" ], "query": "this AND that OR thus", "tie_breaker": 0 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8ed31628081db2b6e9106d61d1e142be.asciidoc000066400000000000000000000004661506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:291 [source, python] ---- resp = client.search( query={ "simple_query_string": { "query": "ny city", "auto_generate_synonyms_phrase_query": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8edcd80d9b545a222dcc2f25ca4c6d5f.asciidoc000066400000000000000000000011321506101734100267700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:455 [source, python] ---- resp = client.search_application.search( name="my-search-app", params={ "query_string": "What is the most popular brand of coffee sold in the United States?", "elser_fields": [ "title", "meta_description" ], "text_fields": [ "title", "meta_description" ], "rrf": { "rank_window_size": 50, "rank_constant": 25 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8ee9521f57661a050efb614f02b4a090.asciidoc000066400000000000000000000004261506101734100263300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:58 [source, python] ---- resp = client.search( aggs={ "genres": { "terms": { "field": "genre" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f0c5c81cdb902c136db821947ee70a1.asciidoc000066400000000000000000000011151506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/min-aggregation.asciidoc:53 [source, python] ---- resp = client.search( index="sales", size=0, runtime_mappings={ "price.adjusted": { "type": "double", "script": "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n " } }, aggs={ "min_price": { "min": { "field": "price.adjusted" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f2875d976332cf5da8fb7764097a307.asciidoc000066400000000000000000000007461506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc:112 [source, python] ---- resp = client.indices.put_index_template( name="template", index_patterns=[ "my-data-stream*" ], data_stream={}, priority=500, template={ "lifecycle": { "data_retention": "7d" } }, meta={ "description": "Template with data stream lifecycle" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f4a7f68f2ca3698abdf20026a2d8c5f.asciidoc000066400000000000000000000003251506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/high-cpu-usage.asciidoc:81 [source, python] ---- resp = client.tasks.list( actions="*search", detailed=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f6f7ea5abf56152b4a5639ddf40848f.asciidoc000066400000000000000000000010561506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/jwt-realm.asciidoc:471 [source, python] ---- resp = client.security.put_role_mapping( name="native1_users", refresh=True, roles=[ "user" ], rules={ "all": [ { "field": { "realm.name": "native1" } }, { "field": { "username": "principalname1" } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f7936f219500305e5b2518dbbf949ea.asciidoc000066400000000000000000000004061506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:742 [source, python] ---- resp = client.async_search.status( id="FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f9a3fcd17a111f63caa3bef6e5f00f2.asciidoc000066400000000000000000000004751506101734100267720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:782 [source, python] ---- resp = client.search( aggs={ "tags": { "terms": { "field": "tags", "execution_hint": "map" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8f9f88cf9a27c1138226efb94ac09e73.asciidoc000066400000000000000000000004001506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/ip.asciidoc:112 [source, python] ---- resp = client.search( index="my-index-000001", query={ "term": { "ip_addr": "192.168.0.0/16" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8fe128323a944765f525c76d85af7a2f.asciidoc000066400000000000000000000011271506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/random-sampler-aggregation.asciidoc:25 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size="0", track_total_hits=False, aggregations={ "sampling": { "random_sampler": { "probability": 0.1 }, "aggs": { "price_percentiles": { "percentiles": { "field": "taxful_total_price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/8fec06a98d0151c1d717a01491d0b8f0.asciidoc000066400000000000000000000006151506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:79 [source, python] ---- resp = client.index( index="dsl-data-stream", document={ "@timestamp": "2023-10-18T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/90083d93e46fad2524755b8d4d1306fc.asciidoc000066400000000000000000000010031506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/set-connector-sync-job-stats-api.asciidoc:81 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/_sync_job/my-connector-sync-job/_stats", headers={"Content-Type": "application/json"}, body={ "deleted_document_count": 10, "indexed_document_count": 20, "indexed_document_volume": 1000, "total_document_count": 2000, "last_seen": "2023-01-02T10:00:00Z" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/901d66919e584515717bf78ab5ca2cbb.asciidoc000066400000000000000000000012771506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/daterange-aggregation.asciidoc:276 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "range": { "date_range": { "field": "date", "time_zone": "CET", "ranges": [ { "to": "2016/02/01" }, { "from": "2016/02/01", "to": "now/d" }, { "from": "now/d" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/902cfd5aeec2f65b3adf55f5e38b21f0.asciidoc000066400000000000000000000003721506101734100267760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:117 [source, python] ---- resp = client.index( index="kibana_sample_data_ecommerce2", document={ "user": "kimchy" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9054187cbab5c9e1c4ca2a4dba6a5db0.asciidoc000066400000000000000000000002131506101734100267460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/info.asciidoc:57 [source, python] ---- resp = client.xpack.info() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/90631797c7fbda43902abf2cc0ea8304.asciidoc000066400000000000000000000003341506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:132 [source, python] ---- resp = client.nodes.stats( metric="indices", index_metric="request_cache", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/908326e14ad76c2ff04a9b6d8365751f.asciidoc000066400000000000000000000011251506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:872 [source, python] ---- resp = client.search( index="passage_vectors", fields=[ "creation_time", "full_text" ], source=False, knn={ "query_vector": [ 0.45, 45 ], "field": "paragraph.vector", "k": 2, "num_candidates": 2, "inner_hits": { "_source": False, "fields": [ "paragraph.text" ], "size": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/909a032a9c1f7095b798444705b09ad6.asciidoc000066400000000000000000000004351506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:443 [source, python] ---- resp = client.index( index="example", document={ "location": "GEOMETRYCOLLECTION (POINT (100.0 0.0), LINESTRING (101.0 0.0, 102.0 1.0))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/90c087560ea6c0b7405f710971c86ef0.asciidoc000066400000000000000000000014251506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:119 [source, python] ---- resp = client.ccr.put_auto_follow_pattern( name="my_auto_follow_pattern", remote_cluster="remote_cluster", leader_index_patterns=[ "leader_index*" ], follow_index_pattern="{{leader_index}}-follower", settings={ "index.number_of_replicas": 0 }, max_read_request_operation_count=1024, max_outstanding_read_requests=16, max_read_request_size="1024k", max_write_request_operation_count=32768, max_write_request_size="16k", max_outstanding_write_requests=8, max_write_buffer_count=512, max_write_buffer_size="512k", max_retry_delay="10s", read_poll_timeout="30s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/90e06d5ec5e454832d8fbd2e73ec2248.asciidoc000066400000000000000000000003221506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/delete-autoscaling-policy.asciidoc:85 [source, python] ---- resp = client.autoscaling.delete_autoscaling_policy( name="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/90f1f5304922fb6d097846dd1444c075.asciidoc000066400000000000000000000011641506101734100262070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:137 [source, python] ---- resp = client.watcher.put_watch( id="cluster_health_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "http": { "request": { "host": "localhost", "port": 9200, "path": "/_cluster/health" } } }, condition={ "compare": { "ctx.payload.status": { "eq": "red" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9116ee8a5b00cc877291ed5559563f24.asciidoc000066400000000000000000000012531506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/ack-watch.asciidoc:68 [source, python] ---- resp = client.watcher.put_watch( id="my_watch", trigger={ "schedule": { "yearly": { "in": "february", "on": 29, "at": "noon" } } }, input={ "simple": { "payload": { "send": "yes" } } }, condition={ "always": {} }, actions={ "test_index": { "throttle_period": "15m", "index": { "index": "test" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/911c56114e50ce7440eb83efc91d28b8.asciidoc000066400000000000000000000006451506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:223 [source, python] ---- resp = client.indices.put_mapping( index="my-data-stream", properties={ "host": { "properties": { "ip": { "type": "ip", "ignore_malformed": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9120b6a49ec39a1571339fddf8e1a26f.asciidoc000066400000000000000000000004721506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:466 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "field": "my-long-field", "value": 10 } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91270cef57ac455547ffd47839420887.asciidoc000066400000000000000000000021101506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/rate-aggregation.asciidoc:175 [source, python] ---- resp = client.search( index="sales", filter_path="aggregations", size="0", aggs={ "buckets": { "composite": { "sources": [ { "month": { "date_histogram": { "field": "date", "calendar_interval": "month" } } }, { "type": { "terms": { "field": "type" } } } ] }, "aggs": { "avg_price": { "rate": { "field": "price", "unit": "day" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9129dec88d35571b3166c6677297f03b.asciidoc000066400000000000000000000003221506101734100262210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/get-transform.asciidoc:115 [source, python] ---- resp = client.transform.get_transform( transform_id="ecommerce_transform1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9138550002cb26ab64918cce427963b8.asciidoc000066400000000000000000000005631506101734100262030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:277 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "foo", "bar" ], priority=0, template={ "settings": { "number_of_shards": 1 } }, version=123, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/913c163c197802078a8af72150178061.asciidoc000066400000000000000000000015601506101734100257550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/derivative-aggregation.asciidoc:136 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } }, "sales_deriv": { "derivative": { "buckets_path": "sales" } }, "sales_2nd_deriv": { "derivative": { "buckets_path": "sales_deriv" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9143be4f137574271953a7a8107e175b.asciidoc000066400000000000000000000003531506101734100261240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-user-profile.asciidoc:69 [source, python] ---- resp = client.security.get_user_profile( uid="u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9169d19a80175ec94f80865d0f9bef4c.asciidoc000066400000000000000000000021711506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:314 [source, python] ---- resp = client.search( index="restaurants", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "multi_match": { "query": "Austria", "fields": [ "city", "region" ] } } } }, { "knn": { "field": "vector", "query_vector": [ 10, 22, 77 ], "k": 10, "num_candidates": 10 } } ], "rank_constant": 1, "rank_window_size": 50 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91750571c195718f0ff246e058e4bc63.asciidoc000066400000000000000000000006151506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:73 [source, python] ---- resp = client.search( index="twitter", query={ "match": { "title": "elasticsearch" } }, sort=[ { "date": "asc" }, { "tie_breaker_id": "asc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91c01fcad9bf341d039a15dfc593dcd7.asciidoc000066400000000000000000000004751506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/field-caps.asciidoc:310 [source, python] ---- resp = client.field_caps( index="my-index-*", fields="rating", index_filter={ "range": { "@timestamp": { "gte": "2018" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91c925fc71abe0ddfe52457e9130363b.asciidoc000066400000000000000000000005261506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/grant-api-keys.asciidoc:178 [source, python] ---- resp = client.security.grant_api_key( grant_type="password", username="test_admin", password="x-pack-test-password", run_as="test_user", api_key={ "name": "another-api-key" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91cbeeda86b4e4e393fc79d4e3a4a781.asciidoc000066400000000000000000000011211506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/sampler-aggregation.asciidoc:91 [source, python] ---- resp = client.search( index="stackoverflow", size="0", query={ "query_string": { "query": "tags:kibana OR tags:javascript" } }, aggs={ "low_quality_keywords": { "significant_terms": { "field": "tags", "size": 3, "exclude": [ "kibana", "javascript" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91e106a2affbc8df32cd940684a779ed.asciidoc000066400000000000000000000005171506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/put-ip-location-database.asciidoc:22 [source, python] ---- resp = client.ingest.put_ip_location_database( id="my-database-1", configuration={ "name": "GeoIP2-Domain", "maxmind": { "account_id": "1234567" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/91ed08faaed54cb5ace9a295af937439.asciidoc000066400000000000000000000010021506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:337 [source, python] ---- resp = client.search( index="my-index-000001", size="0", runtime_mappings={ "message.length": { "type": "long", "script": "emit(doc['message.keyword'].value.length())" } }, aggs={ "message_length": { "histogram": { "interval": 10, "field": "message.length" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9200ed8d5f798a158def4c526e41269e.asciidoc000066400000000000000000000003031506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/field-caps.asciidoc:191 [source, python] ---- resp = client.field_caps( index="my-index-000001", fields="rating", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92035a2a62d01a511662af65606d5fc6.asciidoc000066400000000000000000000011311506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-sort-aggregation.asciidoc:142 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "bucket_truncate": { "bucket_sort": { "from": 1, "size": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9216e8e544e6d193eda1f59e9160a225.asciidoc000066400000000000000000000012751506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-near-query.asciidoc:12 [source, python] ---- resp = client.search( query={ "span_near": { "clauses": [ { "span_term": { "field": "value1" } }, { "span_term": { "field": "value2" } }, { "span_term": { "field": "value3" } } ], "slop": 12, "in_order": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/922529276f87cb9d116be2468d108466.asciidoc000066400000000000000000000005531506101734100261440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/specify-analyzer.asciidoc:74 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "default": { "type": "simple" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9225841fdcddaf83ebdb90c2b0399e20.asciidoc000066400000000000000000000002771506101734100266430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/get-trained-models-stats.asciidoc:412 [source, python] ---- resp = client.ml.get_trained_models_stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92284d24bbb80ce6943f2ddcbf74b833.asciidoc000066400000000000000000000012161506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:136 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "flattened_field": { "type": "flattened" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "flattened_field": { "subfield": "value" } }, ) print(resp1) resp2 = client.search( index="my-index-000001", fields=[ "flattened_field.subfield" ], source=False, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/923aee95078219ee6eb321a252e1121b.asciidoc000066400000000000000000000007351506101734100263300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/ngram-tokenfilter.asciidoc:161 [source, python] ---- resp = client.indices.create( index="ngram_example", settings={ "analysis": { "analyzer": { "standard_ngram": { "tokenizer": "standard", "filter": [ "ngram" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9250ac57ec81d5192e8ad4c462438489.asciidoc000066400000000000000000000022261506101734100262770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:204 [source, python] ---- resp = client.bulk( index="jinaai-index", operations=[ { "index": { "_index": "jinaai-index", "_id": "1" } }, { "content": "Sarah Johnson is a talented marine biologist working at the Oceanographic Institute. Her groundbreaking research on coral reef ecosystems has garnered international attention and numerous accolades." }, { "index": { "_index": "jinaai-index", "_id": "2" } }, { "content": "She spends months at a time diving in remote locations, meticulously documenting the intricate relationships between various marine species. " }, { "index": { "_index": "jinaai-index", "_id": "3" } }, { "content": "Her dedication to preserving these delicate underwater environments has inspired a new generation of conservationists." } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/926c0134aeaad53bd0f3bdad9c430217.asciidoc000066400000000000000000000004151506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:769 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "text": "words words", "flag": "foo" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9270964d35d172ea5b193c5fc7a473dd.asciidoc000066400000000000000000000003031506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/templates.asciidoc:67 [source, python] ---- resp = client.cat.templates( name="my-template-*", v=True, s="name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/927b20a221f975b75d1227b67d0eb7e2.asciidoc000066400000000000000000000006461506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:268 [source, python] ---- resp = client.esql.query( query="\n FROM library\n | EVAL year = DATE_EXTRACT(\"year\", release_date)\n | WHERE page_count > ? AND author == ?\n | STATS count = COUNT(*) by year\n | WHERE count > ?\n | LIMIT 5\n ", params=[ 300, "Frank Herbert", 0 ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9298aaf8232a819e79b3bf8471245e98.asciidoc000066400000000000000000000003141506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-job-stats.asciidoc:381 [source, python] ---- resp = client.ml.get_job_stats( job_id="low_request_rate", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92d0c12d53a900308150d572c3f2f82f.asciidoc000066400000000000000000000007451506101734100262460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:477 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "strings_as_keywords": { "match_mapping_type": "string", "mapping": { "type": "keyword" } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92d343eb755971c44a939d0660bf5ac2.asciidoc000066400000000000000000000005461506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/refresh.asciidoc:87 [source, python] ---- resp = client.index( index="test", id="1", refresh=True, document={ "test": "test" }, ) print(resp) resp1 = client.index( index="test", id="2", refresh=True, document={ "test": "test" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/92f073762634a4b2274f71002494192e.asciidoc000066400000000000000000000003341506101734100257550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/add-nodes.asciidoc:152 [source, python] ---- resp = client.cluster.state( filter_path="metadata.cluster_coordination.voting_config_exclusions", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92fa6608673cec5a2ed568a07e80d36b.asciidoc000066400000000000000000000004651506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1549 [source, python] ---- resp = client.search( index="my-index-000001", query={ "range": { "timestamp": { "gte": "2020-04-30T14:31:27-05:00" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/92fe53019958ba466d1272da0834cf53.asciidoc000066400000000000000000000002541506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/stats.asciidoc:17 [source, python] ---- resp = client.indices.stats( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/930a3c5667e3bf47b4e8cc28e7bf8d5f.asciidoc000066400000000000000000000014051506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/run-as-privilege.asciidoc:114 [source, python] ---- resp = client.security.put_role( name="my_admin_role", refresh=True, cluster=[ "manage" ], indices=[ { "names": [ "index1", "index2" ], "privileges": [ "manage" ] } ], applications=[ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], run_as=[ "analyst_user" ], metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/930ba37af73dd5ff0342ecfe6c60a4e9.asciidoc000066400000000000000000000005151506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/extendedstats-aggregation.asciidoc:14 [source, python] ---- resp = client.search( index="exams", size=0, aggs={ "grades_stats": { "extended_stats": { "field": "grade" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9313f534e1aa266cde7d4af74665497f.asciidoc000066400000000000000000000005261506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-zoom.asciidoc:219 [source, python] ---- resp = client.connector.put( connector_id="my-{service-name-stub}-connector", index_name="my-elasticsearch-index", name="Content synced from {service-name}", service_type="{service-name-stub}", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/931817b168e055ecf738785c721125dd.asciidoc000066400000000000000000000022421506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/inference.asciidoc:750 [source, python] ---- resp = client.ingest.put_pipeline( id="query_helper_pipeline", processors=[ { "script": { "source": "ctx.prompt = 'Please generate an elasticsearch search query on index `articles_index` for the following natural language query. Dates are in the field `@timestamp`, document types are in the field `type` (options are `news`, `publication`), categories in the field `category` and can be multiple (options are `medicine`, `pharmaceuticals`, `technology`), and document names are in the field `title` which should use a fuzzy match. Ignore fields which cannot be determined from the natural language query context: ' + ctx.content" } }, { "inference": { "model_id": "openai_chat_completions", "input_output": { "input_field": "prompt", "output_field": "query" } } }, { "remove": { "field": "prompt" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/931da02a06953a768f4ad3fecfd7b2df.asciidoc000066400000000000000000000004601506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/total-shards-per-node.asciidoc:147 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", name="index.routing.allocation.total_shards_per_node", flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9326e323f7ffde678fa04d2d1de3d3bc.asciidoc000066400000000000000000000010771506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:603 [source, python] ---- resp = client.search( index="alibabacloud-ai-search-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "alibabacloud_ai_search_embeddings", "model_text": "Calculate fuel cost" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9334ccd09548b585cd637d7c66c5ae65.asciidoc000066400000000000000000000023471506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:244 [source, python] ---- resp = client.search( query={ "match": { "message": { "operator": "or", "query": "the quick brown" } } }, rescore=[ { "window_size": 100, "query": { "rescore_query": { "match_phrase": { "message": { "query": "the quick brown", "slop": 2 } } }, "query_weight": 0.7, "rescore_query_weight": 1.2 } }, { "window_size": 10, "query": { "score_mode": "multiply", "rescore_query": { "function_score": { "script_score": { "script": { "source": "Math.log10(doc.count.value + 2)" } } } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/93429d2bfbc0a9b7a4854b27e34658cf.asciidoc000066400000000000000000000006021506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:23 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "message": { "type": "text" }, "query": { "type": "percolator" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/93444b445446c1a6033347d6267253d6.asciidoc000066400000000000000000000004541506101734100257620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-phrase-prefix-query.asciidoc:22 [source, python] ---- resp = client.search( query={ "match_phrase_prefix": { "message": { "query": "quick brown f" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/934aa38c3adcc4cf74ea40cd8736876c.asciidoc000066400000000000000000000005331506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:178 [source, python] ---- resp = client.indices.create( index="test", settings={ "number_of_shards": 1 }, mappings={ "properties": { "field1": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/934ced0998552cc95a28e48554147e8b.asciidoc000066400000000000000000000004031506101734100263060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:582 [source, python] ---- resp = client.cluster.allocation_explain( index="my-index", shard=0, primary=False, current_node="my-node", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/935566d5426d44ade486a49ec5289741.asciidoc000066400000000000000000000005031506101734100262250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-text-hybrid-search:76 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 10 }, dest={ "index": "semantic-embeddings" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/935ee7c1b86ba9592604834bb673c7a3.asciidoc000066400000000000000000000035171506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geotilegrid-aggregation.asciidoc:38 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggregations={ "large-grid": { "geotile_grid": { "field": "location", "precision": 8 } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/936d809c848f8b77d5b55f57f0aab89a.asciidoc000066400000000000000000000005741506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:81 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "date_detection": False }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "create_date": "2015/09/02" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/937089157fc82cf08b68a954d0e6d52c.asciidoc000066400000000000000000000005151506101734100263640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:240 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence with maxspan=1h\n [ process where process.name == \"regsvr32.exe\" ]\n [ file where stringContains(file.name, \"scrobj.dll\") ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9370e4935ab6678571d3227973b8c830.asciidoc000066400000000000000000000003271506101734100260650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:37 [source, python] ---- resp = client.indices.get( index="_all", filter_path="*.aliases", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/937ffc65cbb20505a8aba25b37a796a5.asciidoc000066400000000000000000000011451506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/boolean.asciidoc:22 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "is_published": { "type": "boolean" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "is_published": "true" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "term": { "is_published": True } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/9382f022086c692ba05efb0acae65946.asciidoc000066400000000000000000000005221506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/synthetic-source.asciidoc:63 [source, python] ---- resp = client.index( index="idx", id="1", document={ "foo": [ { "bar": 1 }, { "bar": 2 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9399cbbd133ec2b7aad2820fa617ae3a.asciidoc000066400000000000000000000006341506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/children-aggregation.asciidoc:16 [source, python] ---- resp = client.indices.create( index="child_example", mappings={ "properties": { "join": { "type": "join", "relations": { "question": "answer" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/93bd651aff81daa2b86f9f2089e6d088.asciidoc000066400000000000000000000011411506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:49 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "my_id": "1", "text": "This is a question", "my_join_field": { "name": "question" } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "my_id": "2", "text": "This is another question", "my_join_field": { "name": "question" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/93cd0fdd5ca22838db06aa1cabdbe8bd.asciidoc000066400000000000000000000010571506101734100272000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:139 [source, python] ---- resp = client.search( index="hugging-face-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "hugging_face_embeddings", "model_text": "What's margin of error?" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/93d7ba4130722cae04f9690e52a8f54f.asciidoc000066400000000000000000000007171506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:459 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "envelope", "coordinates": [ [ 100, 1 ], [ 101, 0 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/93fb59d3204f37af952198b331fb6bb7.asciidoc000066400000000000000000000003511506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:223 [source, python] ---- resp = client.tasks.get( task_id="oTUltX4IQMOUUVeiohTt8A:12345", wait_for_completion=True, timeout="10s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9403764e6eccad7b321b65e9a10c5727.asciidoc000066400000000000000000000005431506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:543 [source, python] ---- resp = client.search( aggs={ "tags": { "terms": { "field": "tags", "include": ".*sport.*", "exclude": "water_.*" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/940e8c2c7ff92d71f489bdb7183c1ce6.asciidoc000066400000000000000000000002501506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/segments.asciidoc:116 [source, python] ---- resp = client.indices.segments( index="test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9410af79177dd1df9b7b16229a581e18.asciidoc000066400000000000000000000003511506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/change-password.asciidoc:76 [source, python] ---- resp = client.security.change_password( username="jacknich", password="new-test-password", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/941c8d05486200e835d97642e4ee05d5.asciidoc000066400000000000000000000021171506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:183 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text": { "type": "text", "term_vector": "with_positions_offsets_payloads", "store": True, "analyzer": "fulltext_analyzer" }, "fullname": { "type": "text", "term_vector": "with_positions_offsets_payloads", "analyzer": "fulltext_analyzer" } } }, settings={ "index": { "number_of_shards": 1, "number_of_replicas": 0 }, "analysis": { "analyzer": { "fulltext_analyzer": { "type": "custom", "tokenizer": "whitespace", "filter": [ "lowercase", "type_as_payload" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/94246f45025ed394cd6415ed8d7a0588.asciidoc000066400000000000000000000002551506101734100263010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/delete-job.asciidoc:85 [source, python] ---- resp = client.rollup.delete_job( id="sensor", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/944806221eb89f5af2298ccdf2902277.asciidoc000066400000000000000000000002621506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-caps.asciidoc:171 [source, python] ---- resp = client.rollup.get_rollup_caps( id="_all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/944a2dc22dae2a8503299926326a9c18.asciidoc000066400000000000000000000011151506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/ip.asciidoc:11 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "ip_addr": { "type": "ip" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "ip_addr": "192.168.1.1" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "term": { "ip_addr": "192.168.0.0/16" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/946522c26d02bebf5c527ba28e55c724.asciidoc000066400000000000000000000003071506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:358 [source, python] ---- resp = client.update_by_query( index="my-index-000001", routing="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9467e52087a13b63b02d78c35ff6f798.asciidoc000066400000000000000000000003661506101734100263120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-phrase-query.asciidoc:11 [source, python] ---- resp = client.search( query={ "match_phrase": { "message": "this is a test" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/947efe87db7f8813c0878f8affc3e2d1.asciidoc000066400000000000000000000002421506101734100266750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve-cluster.asciidoc:83 [source, python] ---- resp = client.indices.resolve_cluster() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/948418e0ef1b7e7cfee2f11be715d7d2.asciidoc000066400000000000000000000045411506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:715 [source, python] ---- resp = client.indices.create( index="retrievers_example_nested", settings={ "number_of_shards": 1 }, mappings={ "properties": { "nested_field": { "type": "nested", "properties": { "paragraph_id": { "type": "keyword" }, "nested_vector": { "type": "dense_vector", "dims": 3, "similarity": "l2_norm", "index": True, "index_options": { "type": "flat" } } } }, "topic": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="retrievers_example_nested", id="1", document={ "nested_field": [ { "paragraph_id": "1a", "nested_vector": [ -1.12, -0.59, 0.78 ] }, { "paragraph_id": "1b", "nested_vector": [ -0.12, 1.56, 0.42 ] }, { "paragraph_id": "1c", "nested_vector": [ 1, -1, 0 ] } ], "topic": [ "ai" ] }, ) print(resp1) resp2 = client.index( index="retrievers_example_nested", id="2", document={ "nested_field": [ { "paragraph_id": "2a", "nested_vector": [ 0.23, 1.24, 0.65 ] } ], "topic": [ "information_retrieval" ] }, ) print(resp2) resp3 = client.index( index="retrievers_example_nested", id="3", document={ "topic": [ "ai" ] }, ) print(resp3) resp4 = client.indices.refresh( index="retrievers_example_nested", ) print(resp4) ---- python-elasticsearch-9.1.1/docs/examples/94cd66bf93f99881c1bda547283a0357.asciidoc000066400000000000000000000016171506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:306 [source, python] ---- resp = client.bulk( index="quantized-image-index", refresh=True, operations=[ { "index": { "_id": "1" } }, { "image-vector": [ 0.1, -2 ], "title": "moose family" }, { "index": { "_id": "2" } }, { "image-vector": [ 0.75, -1 ], "title": "alpine lake" }, { "index": { "_id": "3" } }, { "image-vector": [ 1.2, 0.1 ], "title": "full moon" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9501e6c8e95c21838653ea15b9b7ed5f.asciidoc000066400000000000000000000003641506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:791 [source, python] ---- resp = client.search( query={ "term": { "query.extraction_result": "failed" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/950f1230536422567f99a205ff4165ec.asciidoc000066400000000000000000000005261506101734100261270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:405 [source, python] ---- resp = client.indices.rollover( alias="my-write-alias", conditions={ "max_age": "7d", "max_docs": 1000, "max_primary_shard_size": "50gb", "max_primary_shard_docs": "2000" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/95414139c7b1203e3c2d99a354415801.asciidoc000066400000000000000000000002311506101734100260230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/recovery.asciidoc:89 [source, python] ---- resp = client.cat.recovery( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9559de0c2190f99fcc344887fc7b232a.asciidoc000066400000000000000000000012631506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:480 [source, python] ---- resp = client.indices.create( index="bicycles", mappings={ "properties": { "cycle_type": { "type": "constant_keyword", "value": "bicycle" }, "name": { "type": "text" } } }, ) print(resp) resp1 = client.indices.create( index="other_cycles", mappings={ "properties": { "cycle_type": { "type": "keyword" }, "name": { "type": "text" } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/956cb470258024af964cd2dabbaf7c7c.asciidoc000066400000000000000000000005571506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-management/migrate-index-allocation-filters.asciidoc:220 [source, python] ---- resp = client.indices.put_settings( index="my-index", settings={ "index.routing.allocation.require.data": None, "index.routing.allocation.include._tier_preference": "data_warm,data_hot" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/957d2e6ddbb9a9b16549c5e67b93b41b.asciidoc000066400000000000000000000005151506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:267 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "content", "name" ], "query": "this AND that" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9584b042223982e0bfde8d12d42c9705.asciidoc000066400000000000000000000005611506101734100262630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-kerberos-realm.asciidoc:179 [source, python] ---- resp = client.security.put_role_mapping( name="kerbrolemapping", roles=[ "monitoring_user" ], enabled=True, rules={ "field": { "username": "user@REALM" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/95b3f53f2065737bbeba6199e8a12df3.asciidoc000066400000000000000000000004241506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-query.asciidoc:152 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "color": [ "blue", "green" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/95c03bdef4faf6bef039c986f4cb3aba.asciidoc000066400000000000000000000004701506101734100271440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:259 [source, python] ---- resp = client.search( index=".watcher-history*", pretty=True, query={ "match": { "result.condition.met": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/95c1b376652533c352bbf793c74d1b08.asciidoc000066400000000000000000000004631506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-role.asciidoc:247 [source, python] ---- resp = client.security.query_role( query={ "match": { "description": { "query": "user access" } } }, size=1, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9606c271921cb800d5ea395b16d6ceaf.asciidoc000066400000000000000000000021341506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:843 [source, python] ---- resp = client.indices.create( index="galician_example", settings={ "analysis": { "filter": { "galician_stop": { "type": "stop", "stopwords": "_galician_" }, "galician_keywords": { "type": "keyword_marker", "keywords": [ "exemplo" ] }, "galician_stemmer": { "type": "stemmer", "language": "galician" } }, "analyzer": { "rebuilt_galician": { "tokenizer": "standard", "filter": [ "lowercase", "galician_stop", "galician_keywords", "galician_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9608820dbeac261ba53fb89bb9400560.asciidoc000066400000000000000000000002701506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:239 [source, python] ---- resp = client.security.get_api_key( owner=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/962e6187bbd71c5749376efed04b65ba.asciidoc000066400000000000000000000010771506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:142 [source, python] ---- resp = client.security.put_role( name="test_role6", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "except": [ "customer.handle" ], "grant": [ "customer.*" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/966ff3a4c5b61ed1a36d44c17ce06157.asciidoc000066400000000000000000000017561506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/normalizers.asciidoc:27 [source, python] ---- resp = client.indices.create( index="index", settings={ "analysis": { "char_filter": { "quote": { "type": "mapping", "mappings": [ "« => \"", "» => \"" ] } }, "normalizer": { "my_normalizer": { "type": "custom", "char_filter": [ "quote" ], "filter": [ "lowercase", "asciifolding" ] } } } }, mappings={ "properties": { "foo": { "type": "keyword", "normalizer": "my_normalizer" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9684e5fa8c22a07a372feb6fc1f5f7c0.asciidoc000066400000000000000000000015561506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/has-privileges.asciidoc:75 [source, python] ---- resp = client.security.has_privileges( cluster=[ "monitor", "manage" ], index=[ { "names": [ "suppliers", "products" ], "privileges": [ "read" ] }, { "names": [ "inventory" ], "privileges": [ "read", "write" ] } ], application=[ { "application": "inventory_manager", "privileges": [ "read", "data:write/inventory" ], "resources": [ "product/1852563" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/968fb5b92aa65af09544f7c002b0953e.asciidoc000066400000000000000000000005501506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-semantic-text.asciidoc:144 [source, python] ---- resp = client.search( index="semantic-embeddings", query={ "semantic": { "field": "content", "query": "How to avoid muscle soreness while running?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/96b9289c3c4c6b135ab3386562c4ee8d.asciidoc000066400000000000000000000004111506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/troubleshooting-shards-capacity.asciidoc:174 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.max_shards_per_node": 1200 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/96e137e42d12c180e2c702db30714a9e.asciidoc000066400000000000000000000004621506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:39 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "full_name": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/96e88611f99e6834bd64b58dc8a282c1.asciidoc000066400000000000000000000006001506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/semantic-text.asciidoc:42 [source, python] ---- resp = client.indices.create( index="my-index-000002", mappings={ "properties": { "inference_field": { "type": "semantic_text", "inference_id": "my-openai-endpoint" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/96ea0e80323d6d2d99964625c004a44d.asciidoc000066400000000000000000000004331506101734100262620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:394 [source, python] ---- resp = client.indices.put_data_lifecycle( name="dsl-data-stream", data_retention="7d", enabled=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/971c7a36ee79f2b3aa82c64ea338de70.asciidoc000066400000000000000000000005261506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:345 [source, python] ---- resp = client.indices.create( index="index", mappings={ "properties": { "foo": { "type": "keyword", "eager_global_ordinals": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/971fd23adb81bb5842c7750e0379336a.asciidoc000066400000000000000000000011741506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:764 [source, python] ---- resp = client.search( index="movies", retriever={ "text_similarity_reranker": { "retriever": { "standard": { "query": { "match": { "genre": "drama" } } } }, "field": "plot", "inference_id": "my-msmarco-minilm-model", "inference_text": "films that explore psychological depths" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/973a3ff47fc4ce036ecd9bd363fef9f7.asciidoc000066400000000000000000000006311506101734100270300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:849 [source, python] ---- resp = client.reindex( source={ "index": "metricbeat-*" }, dest={ "index": "metricbeat" }, script={ "lang": "painless", "source": "ctx._index = 'metricbeat-' + (ctx._index.substring('metricbeat-'.length(), ctx._index.length())) + '-1'" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/975b4b92464d52068516aa2f0f955cc1.asciidoc000066400000000000000000000002571506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/segments.asciidoc:125 [source, python] ---- resp = client.indices.segments( index="test1,test2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/976e5f9baf81bd6ca0e9f80916a0a4f9.asciidoc000066400000000000000000000010571506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:18 [source, python] ---- resp = client.security.put_role( name="test_role1", indices=[ { "names": [ "events-*" ], "privileges": [ "read" ], "field_security": { "grant": [ "category", "@timestamp", "message" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97916243f245478b735471a9e37f33d1.asciidoc000066400000000000000000000010361506101734100260620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/iprange-aggregation.asciidoc:12 [source, python] ---- resp = client.search( index="ip_addresses", size=10, aggs={ "ip_ranges": { "ip_range": { "field": "ip", "ranges": [ { "to": "10.0.0.5" }, { "from": "10.0.0.5" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97a3216af3d4b4d805d467d9c715cb3e.asciidoc000066400000000000000000000003141506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/get-desired-balance.asciidoc:27 [source, python] ---- resp = client.perform_request( "GET", "/_internal/desired_balance", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97ae2b62aa372a955278be6f660356ba.asciidoc000066400000000000000000000005321506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/combined-fields-query.asciidoc:57 [source, python] ---- resp = client.search( query={ "combined_fields": { "query": "distributed consensus", "fields": [ "title^2", "body" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97babc8d19ef0866774576716eb6d19e.asciidoc000066400000000000000000000005671506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:781 [source, python] ---- resp = client.update_by_query( index="test", refresh=True, conflicts="proceed", ) print(resp) resp1 = client.search( index="test", filter_path="hits.total", query={ "match": { "flag": "foo" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/97c6c07f46f4177f0565a04bc50924a3.asciidoc000066400000000000000000000021601506101734100262610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:113 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } } ], "rank_window_size": 10, "rank_constant": 1 } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97da68c09c9f1a97a21780fd404e213a.asciidoc000066400000000000000000000006371506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/ipprefix-aggregation.asciidoc:279 [source, python] ---- resp = client.search( index="network-traffic", size=0, aggs={ "ipv4-subnets": { "ip_prefix": { "field": "ipv4", "prefix_length": 24, "append_prefix_length": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97ea5ab17213cb1faaf6f3ea13607098.asciidoc000066400000000000000000000002271506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/start.asciidoc:49 [source, python] ---- resp = client.watcher.start() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/97f5df84efec655f479fad78bc392d4d.asciidoc000066400000000000000000000014421506101734100267660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:835 [source, python] ---- resp = client.search( index="my-index-000001", profile=True, query={ "term": { "user.id": { "value": "elkbee" } } }, aggs={ "my_scoped_agg": { "terms": { "field": "http.response.status_code" } }, "my_global_agg": { "global": {}, "aggs": { "my_level_agg": { "terms": { "field": "http.response.status_code" } } } } }, post_filter={ "match": { "message": "search" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/983fbb78e57e8fe98db38cf2d217e943.asciidoc000066400000000000000000000021731506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-inner-hits.asciidoc:212 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "comments": { "type": "nested" } } }, ) print(resp) resp1 = client.index( index="test", id="1", refresh=True, document={ "title": "Test title", "comments": [ { "author": "kimchy", "text": "comment text" }, { "author": "nik9000", "text": "words words words" } ] }, ) print(resp1) resp2 = client.search( index="test", query={ "nested": { "path": "comments", "query": { "match": { "comments.text": "words" } }, "inner_hits": { "_source": False, "docvalue_fields": [ "comments.text.keyword" ] } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/9851f5225150bc032fb3b195cd447f4f.asciidoc000066400000000000000000000016241506101734100263360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:213 [source, python] ---- resp = client.bulk( index="byte-image-index", refresh=True, operations=[ { "index": { "_id": "1" } }, { "byte-image-vector": [ 5, -20 ], "title": "moose family" }, { "index": { "_id": "2" } }, { "byte-image-vector": [ 8, -15 ], "title": "alpine lake" }, { "index": { "_id": "3" } }, { "byte-image-vector": [ 11, 23 ], "title": "full moon" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98574a419b6be603a0af8f7f22a92d23.asciidoc000066400000000000000000000002401506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/grok.asciidoc:258 [source, python] ---- resp = client.ingest.processor_grok() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98621bea4765b1b838cc9daa914bf5c5.asciidoc000066400000000000000000000005531506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:340 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence with maxspan=1h\n [ process where process.name == \"regsvr32.exe\" ] by process.pid\n [ file where stringContains(file.name, \"scrobj.dll\") ] by process.pid\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/986f892bfa4dfdf1da8455fdf84a4b0c.asciidoc000066400000000000000000000010771506101734100270260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-alibabacloud-ai-search.asciidoc:228 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="alibabacloud_ai_search_embeddings", inference_config={ "service": "alibabacloud-ai-search", "service_settings": { "api_key": "", "service_id": "ops-text-embedding-001", "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", "workspace": "default" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98855f4bda8726d5d123aeebf7869e47.asciidoc000066400000000000000000000002331506101734100265330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/nodeattrs.asciidoc:88 [source, python] ---- resp = client.cat.nodeattrs( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9887f65af249bbf09190b1153ea2597b.asciidoc000066400000000000000000000003531506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:615 [source, python] ---- resp = client.sql.get_async_status( id="FnR0TDhyWUVmUmVtWXRWZER4MXZiNFEad2F5UDk2ZVdTVHV1S0xDUy00SklUdzozMTU=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98b403c356a9b14544e9b9f646845e9f.asciidoc000066400000000000000000000007761506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:848 [source, python] ---- resp = client.put_script( id="my-search-template", script={ "lang": "mustache", "source": { "query": { "multi_match": { "query": "{{query_string}}", "fields": "[{{#text_fields}}{{user_name}}{{^last}},{{/last}}{{/text_fields}}]" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98c1080d8630d3a18d564312300d020f.asciidoc000066400000000000000000000012321506101734100260670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/network-direction.asciidoc:66 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "network_direction": { "internal_networks": [ "private" ] } } ] }, docs=[ { "_source": { "source": { "ip": "128.232.110.120" }, "destination": { "ip": "192.168.1.1" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98f43710cedd28a464e8abf4b09bcc9a.asciidoc000066400000000000000000000006751506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:95 [source, python] ---- resp = client.search( index="my-index-000001", query={ "range": { "@timestamp": { "gte": "now-1d/d", "lt": "now/d" } } }, aggs={ "my-agg-name": { "terms": { "field": "my-field" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/98f7525ec0bc8945eafa008a5a9c50c0.asciidoc000066400000000000000000000004131506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1253 [source, python] ---- resp = client.eql.search( index="my-data-stream", wait_for_completion_timeout="2s", query="\n process where process.name == \"cmd.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/990c0d794ed6f05d1620b5d49f7aff6e.asciidoc000066400000000000000000000003471506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc:183 [source, python] ---- resp = client.indices.get_data_lifecycle( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99160b7c3c3fc1fac98aeb426dbcb3cb.asciidoc000066400000000000000000000015171506101734100270540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/fields.asciidoc:244 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "first_name": { "type": "text" }, "last_name": { "type": "text" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "first_name": "Barry", "last_name": "White" }, ) print(resp1) resp2 = client.search( index="my-index-000001", script_fields={ "full_name": { "script": { "lang": "painless", "source": "params._source.first_name + ' ' + params._source.last_name" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/991b9ba53f0eccec8ec5a42f8d9b655c.asciidoc000066400000000000000000000006201506101734100270120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:628 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "fields": { "body": {}, "blog.title": { "number_of_fragments": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99474a7e7979816c874aeac4403be5d0.asciidoc000066400000000000000000000011071506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/rate-aggregation.asciidoc:104 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "by_date": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "avg_price": { "rate": { "field": "price", "unit": "day" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/996521cef7803ef363a49ac6321ea1de.asciidoc000066400000000000000000000006201506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:256 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence with maxspan=1d\n [ process where process.name == \"cmd.exe\" ]\n ![ process where stringContains(process.command_line, \"ocx\") ]\n [ file where stringContains(file.name, \"scrobj.dll\") ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/996f320a0f537c24b9cd0d71b5f7c1f8.asciidoc000066400000000000000000000011721506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:175 [source, python] ---- resp = client.search( query={ "function_score": { "query": { "match": { "message": "elasticsearch" } }, "script_score": { "script": { "params": { "a": 5, "b": 1.2 }, "source": "params.a / Math.pow(params.b, doc['my-int'].value)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99803d7b111b862c0c82e9908e549b16.asciidoc000066400000000000000000000006251506101734100262170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-mistral.asciidoc:113 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="mistral-embeddings-test", inference_config={ "service": "mistral", "service_settings": { "api_key": "", "model": "mistral-embed" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/998651b98e152add530084a631a4ab5a.asciidoc000066400000000000000000000003731506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:528 [source, python] ---- resp = client.cluster.put_settings( persistent={ "indices.lifecycle.poll_interval": "1m" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/998c8479c8704bca0e121d5969859517.asciidoc000066400000000000000000000002641506101734100261620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:417 [source, python] ---- resp = client.count( index="music", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99a56f423df3a0e57b7f20146f0d33b5.asciidoc000066400000000000000000000006051506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/match-only-text.asciidoc:26 [source, python] ---- resp = client.indices.create( index="logs", mappings={ "properties": { "@timestamp": { "type": "date" }, "message": { "type": "match_only_text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99b617a0a83fcfbe5755ccc724a4ce62.asciidoc000066400000000000000000000006151506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:118 [source, python] ---- resp = client.index( index="place_path_category", id="1", document={ "suggest": [ "timmy's", "starbucks", "dunkin donuts" ], "cat": [ "cafe", "food" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99c1cfe60f3ccf5bf3abd24c31ed9034.asciidoc000066400000000000000000000007311506101734100267730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc:20 [source, python] ---- resp = client.ccr.put_auto_follow_pattern( name="", remote_cluster="", leader_index_patterns=[ "" ], leader_index_exclusion_patterns=[ "" ], follow_index_pattern="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/99fb82d49ac477e6a9dfdd71f9465374.asciidoc000066400000000000000000000003341506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-ip-location-database.asciidoc:58 [source, python] ---- resp = client.ingest.delete_ip_location_database( id="example-database-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a02bd47c000a3d9a8911233c37c890f.asciidoc000066400000000000000000000012721506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:367 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "date": "2015-10-01T00:30:00Z" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "date": "2015-10-01T01:30:00Z" }, ) print(resp1) resp2 = client.search( index="my-index-000001", size="0", aggs={ "by_day": { "date_histogram": { "field": "date", "calendar_interval": "day" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/9a036a792be1d39af9fd0d1adb5f3402.asciidoc000066400000000000000000000006651506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keep-words-tokenfilter.asciidoc:26 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "keep", "keep_words": [ "dog", "elephant", "fox" ] } ], text="the quick fox jumps over the lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a05cc10eea1251e23b82a4549913536.asciidoc000066400000000000000000000004001506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:108 [source, python] ---- resp = client.cat.allocation( v=True, s="node", h="node,shards,disk.percent,disk.indices,disk.used", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a09d33ec11e20b6081cae882282ca60.asciidoc000066400000000000000000000003221506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-privileges-cache.asciidoc:63 [source, python] ---- resp = client.security.clear_cached_privileges( application="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a203aae3e1412d919546276fb52a5ca.asciidoc000066400000000000000000000007011506101734100264000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-cohere.asciidoc:196 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="cohere-embeddings", inference_config={ "service": "cohere", "service_settings": { "api_key": "", "model_id": "embed-english-light-v3.0", "embedding_type": "byte" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a49b7572d571e00e20dbebdd30f9368.asciidoc000066400000000000000000000023311506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-vector-tile-api.asciidoc:119 [source, python] ---- resp = client.search( index="my-index", size=10000, query={ "geo_bounding_box": { "my-geo-field": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, aggregations={ "grid": { "geotile_grid": { "field": "my-geo-field", "precision": 11, "size": 65536, "bounds": { "top_left": { "lat": -40.979898069620134, "lon": -45 }, "bottom_right": { "lat": -66.51326044311186, "lon": 0 } } } }, "bounds": { "geo_bounds": { "field": "my-geo-field", "wrap_longitude": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9a4d5e41c52c20635d1fd9c6e13f6c7a.asciidoc000066400000000000000000000006451506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:833 [source, python] ---- resp = client.index( index="metricbeat-2016.05.30", id="1", refresh=True, document={ "system.cpu.idle.pct": 0.908 }, ) print(resp) resp1 = client.index( index="metricbeat-2016.05.31", id="1", refresh=True, document={ "system.cpu.idle.pct": 0.105 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/9a743b6575c6fe5acdf46024a7fda8a1.asciidoc000066400000000000000000000005511506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:542 [source, python] ---- resp = client.search( index="my_test_scores_2", query={ "term": { "grad_year": "2099" } }, sort=[ { "total_score": { "order": "desc" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ab351893dae65ec97fd8cb6832950fb.asciidoc000066400000000000000000000016421506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1288 [source, python] ---- resp = client.search( index="product-index", query={ "script_score": { "query": { "bool": { "filter": { "range": { "price": { "gte": 1000 } } } } }, "script": { "source": "cosineSimilarity(params.queryVector, 'product-vector') + 1.0", "params": { "queryVector": [ -0.5, 90, -10, 14.8, -156 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ad14a9d7bf2699e2d86b6a607d410c0.asciidoc000066400000000000000000000003341506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:112 [source, python] ---- resp = client.search_application.get( name="my_search_application", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ad38ab4d9c3983e97e8c38fec611f10.asciidoc000066400000000000000000000006061506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/getting-started.asciidoc:107 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "leader": { "seeds": [ "127.0.0.1:9300" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ae268058c0ea32ef8926568e011c728.asciidoc000066400000000000000000000006411506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-features-api.asciidoc:129 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/my-connector/_features", headers={"Content-Type": "application/json"}, body={ "features": { "document_level_security": { "enabled": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9aedc45f83e022732789e8d796f5a43c.asciidoc000066400000000000000000000010121506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/reroute.asciidoc:200 [source, python] ---- resp = client.cluster.reroute( commands=[ { "move": { "index": "test", "shard": 0, "from_node": "node1", "to_node": "node2" } }, { "allocate_replica": { "index": "test", "shard": 1, "node": "node3" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9af44592fb2e78fb17ad3e834bbef7a7.asciidoc000066400000000000000000000002361506101734100267370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/geoip-stats.asciidoc:17 [source, python] ---- resp = client.ingest.geo_ip_stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9afa0844883b7471883aa378a8dd10b4.asciidoc000066400000000000000000000021021506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/post-analytics-collection-event.asciidoc:75 [source, python] ---- resp = client.search_application.post_behavioral_analytics_event( collection_name="my_analytics_collection", event_type="search_click", payload={ "session": { "id": "1797ca95-91c9-4e2e-b1bd-9c38e6f386a9" }, "user": { "id": "5f26f01a-bbee-4202-9298-81261067abbd" }, "search": { "query": "search term", "results": { "items": [ { "document": { "id": "123", "index": "products" } } ], "total_results": 10 }, "sort": { "name": "relevance" }, "search_application": "website" }, "document": { "id": "123", "index": "products" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9b0f34d122a4b348dc86df7410d6ebb6.asciidoc000066400000000000000000000003711506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/cancel-connector-sync-job-api.asciidoc:57 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/_sync_job/my-connector-sync-job-id/_cancel", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9b30a69fec54cf01f7af1b04a6e15239.asciidoc000066400000000000000000000002241506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/get-ccr-stats.asciidoc:109 [source, python] ---- resp = client.ccr.stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9b345e0bfd45f3a37194585ec9193478.asciidoc000066400000000000000000000002671506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/forcemerge.asciidoc:179 [source, python] ---- resp = client.indices.forcemerge( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9b68748c061b768c0153c1f2508ce207.asciidoc000066400000000000000000000011561506101734100262040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:49 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "clusterA": { "mode": "proxy", "skip_unavailable": "true", "server_name": "clustera.es.region-a.gcp.elastic-cloud.com", "proxy_socket_connections": "18", "proxy_address": "clustera.es.region-a.gcp.elastic-cloud.com:9400" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9b92266d87170e93a84f9700596d9035.asciidoc000066400000000000000000000011471506101734100260750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:30 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "location": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.index( index="example", refresh=True, document={ "name": "Wind & Wetter, Berlin, Germany", "location": { "type": "point", "coordinates": [ 13.400544, 52.530286 ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/9ba6f1e64c1dfff5aac26eaa1d093f48.asciidoc000066400000000000000000000015261506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stemmer-override-tokenfilter.asciidoc:57 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "lowercase", "custom_stems", "porter_stem" ] } }, "filter": { "custom_stems": { "type": "stemmer_override", "rules": [ "running, runs => run", "stemmer => stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ba868784f417a8d3679b3c8ed5939ad.asciidoc000066400000000000000000000006351506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:176 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_size": "100gb" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9bae72e974bdeb56007d9104e73eff92.asciidoc000066400000000000000000000003231506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:188 [source, python] ---- resp = client.update( index="test", id="1", script="ctx._source.remove('new_field')", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9bb24fe09e3d1c73a71d00b994ba8cfb.asciidoc000066400000000000000000000002421506101734100267100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/shards.asciidoc:352 [source, python] ---- resp = client.cat.shards( index="my-index-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9bd5a470ee6d2b4a1f5280adc39675d2.asciidoc000066400000000000000000000014051506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-mysql.asciidoc:503 [source, python] ---- resp = client.update( index=".elastic-connectors", id="connector_id", doc={ "configuration": { "tables": { "type": "list", "value": "*" }, "ssl_enabled": { "type": "bool", "value": False }, "ssl_ca": { "type": "str", "value": "" }, "fetch_size": { "type": "int", "value": 50 }, "retry_count": { "type": "int", "value": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9beb260834f8cfb240f6308950dbb9c2.asciidoc000066400000000000000000000006631506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:523 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": "drm3btev3e86", "order": "asc", "unit": "km" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9bfdda207b701028a3439e495e800c02.asciidoc000066400000000000000000000006441506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:288 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "calendar_interval": "1M", "format": "yyyy-MM-dd" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c01db07c9ac395b6370e3b33965c21f.asciidoc000066400000000000000000000007321506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/oidc-authenticate-api.asciidoc:74 [source, python] ---- resp = client.security.oidc_authenticate( redirect_uri="https://oidc-kibana.elastic.co:5603/api/security/oidc/callback?code=jtI3Ntt8v3_XvcLzCFGq&state=4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", state="4dbrihtIAt3wBTwo6DxK-vdk-sSyDBV8Yf0AjdkdT5I", nonce="WaBPH0KqPVdG5HHdSxPRjfoZbXMCicm5v1OiAj0DUFM", realm="oidc1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c021836acf7c0370e289f611325868d.asciidoc000066400000000000000000000006531506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-configuration-api.asciidoc:315 [source, python] ---- resp = client.connector.update_configuration( connector_id="my-spo-connector", values={ "tenant_id": "my-tenant-id", "tenant_name": "my-sharepoint-site", "client_id": "foo", "secret_value": "bar", "site_collections": "*" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c2ce0132e4527077443f007d27b1158.asciidoc000066400000000000000000000012261506101734100261060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:422 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "flattened": { "type": "flattened" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "flattened": { "field": [ "foo" ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/9c4ac64e73141f6cbf2fb6da0743d9b7.asciidoc000066400000000000000000000005241506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/specify-analyzer.asciidoc:130 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": { "query": "Quick foxes", "analyzer": "stop" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c5cbbdbe0075ab9c2611627fe4748fb.asciidoc000066400000000000000000000010001506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/decimal-digit-tokenfilter.asciidoc:75 [source, python] ---- resp = client.indices.create( index="decimal_digit_example", settings={ "analysis": { "analyzer": { "whitespace_decimal_digit": { "tokenizer": "whitespace", "filter": [ "decimal_digit" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c6ea5fe2339d6c7e5e4bf1b98990248.asciidoc000066400000000000000000000006451506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:132 [source, python] ---- resp = client.search( index="image-index", knn={ "field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 10, "num_candidates": 100 }, fields=[ "title", "file-type" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9c7c8051592b6af3adb5d7c490849068.asciidoc000066400000000000000000000007121506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/put-datafeed.asciidoc:168 [source, python] ---- resp = client.ml.put_datafeed( datafeed_id="datafeed-test-job", pretty=True, indices=[ "kibana_sample_data_logs" ], query={ "bool": { "must": [ { "match_all": {} } ] } }, job_id="test-job", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cb150d67dfa0947f29aa809bcc93c6e.asciidoc000066400000000000000000000004001506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // datatiers.asciidoc:240 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", filter_path="*.settings.index.routing.allocation.include._tier_preference", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cbb097e5498a9fde39e3b1d3b62a4d2.asciidoc000066400000000000000000000010361506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:1052 [source, python] ---- resp = client.ml.infer_trained_model( model_id="model2", docs=[ { "text_field": "This is a very happy person" } ], inference_config={ "zero_shot_classification": { "labels": [ "glad", "sad", "bad", "rad" ], "multi_label": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cc64ab2f60f995f5dbfaca67aa6dd41.asciidoc000066400000000000000000000004601506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-query-api.asciidoc:16 [source, python] ---- resp = client.esql.query( query="\n FROM library\n | EVAL year = DATE_TRUNC(1 YEARS, release_date)\n | STATS MAX(page_count) BY year\n | SORT year\n | LIMIT 5\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cc952d4a03264b700136cbc45abc8c6.asciidoc000066400000000000000000000012431506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-vectors.asciidoc:42 [source, python] ---- resp = client.indices.create( index="my-rank-vectors-byte", mappings={ "properties": { "my_vector": { "type": "rank_vectors", "element_type": "byte" } } }, ) print(resp) resp1 = client.index( index="my-rank-vectors-byte", id="1", document={ "my_vector": [ [ 1, 2, 3 ], [ 4, 5, 6 ] ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/9cd37d0ccbc66ad47ddb626564b27cc8.asciidoc000066400000000000000000000021051506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:333 [source, python] ---- resp = client.watcher.execute_watch( watch={ "trigger": { "schedule": { "interval": "10s" } }, "input": { "search": { "request": { "indices": [ "logs" ], "body": { "query": { "match": { "message": "error" } } } } } }, "condition": { "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, "actions": { "log_error": { "logging": { "text": "Found {{ctx.payload.hits.total}} errors in the logs" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cf6c7012a4f2bb562bc256aa28c3409.asciidoc000066400000000000000000000003661506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:320 [source, python] ---- resp = client.watcher.execute_watch( id="my_watch", action_modes={ "_all": "force_execute" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9cfbc41bb7b6fbdb26550dd2789c274e.asciidoc000066400000000000000000000005311506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:521 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", refresh=True, slices="5", query={ "range": { "http.response.bytes": { "lt": 2000000 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d1fb129ac783355a20097effded1845.asciidoc000066400000000000000000000015631506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:12 [source, python] ---- resp = client.bulk( index="test", refresh=True, operations=[ { "index": {} }, { "s": 1, "m": 3.1415 }, { "index": {} }, { "s": 2, "m": 1 }, { "index": {} }, { "s": 3, "m": 2.71828 } ], ) print(resp) resp1 = client.search( index="test", filter_path="aggregations", aggs={ "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "s": "desc" } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/9d31c7eaf8c6b56cee2fdfdde8a442bb.asciidoc000066400000000000000000000006461506101734100272340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-shrink.asciidoc:90 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "shrink": { "max_primary_shard_size": "50gb" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d396afad93782699d7a929578c85284.asciidoc000066400000000000000000000010411506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:192 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="google_vertex_ai_embeddings", inference_config={ "service": "googlevertexai", "service_settings": { "service_account_json": "", "model_id": "text-embedding-004", "location": "", "project_id": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d461ae140ddc018efd2650559800cd1.asciidoc000066400000000000000000000010211506101734100263760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-allocate.asciidoc:147 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "allocate": { "number_of_replicas": 1, "require": { "box_type": "cold" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d5855075e7008270459cc88c189043d.asciidoc000066400000000000000000000004661506101734100260710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:112 [source, python] ---- resp = client.security.put_user( username="cross-cluster-user", password="l0ng-r4nd0m-p@ssw0rd", roles=[ "remote-replication" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d662fc9f943c287b7144f5e4e2ae358.asciidoc000066400000000000000000000006211506101734100264470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/median-absolute-deviation-aggregation.asciidoc:90 [source, python] ---- resp = client.search( index="reviews", size=0, aggs={ "review_variability": { "median_absolute_deviation": { "field": "rating", "compression": 100 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d66cb59711f24e6b4ff85608c9b5a1b.asciidoc000066400000000000000000000003431506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:73 [source, python] ---- resp = client.tasks.list( pretty=True, human=True, detailed=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d67db8370a98854812d38ae73ee2a12.asciidoc000066400000000000000000000012151506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:302 [source, python] ---- resp = client.search( index="index2", query={ "query_string": { "query": "running with scissors", "fields": [ "comment", "comment.english" ] } }, highlight={ "order": "score", "fields": { "comment": { "type": "fvh", "matched_fields": [ "comment", "comment.english" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d79645ab3a9da3f63c54a1516214a5a.asciidoc000066400000000000000000000002171506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // health/health.asciidoc:471 [source, python] ---- resp = client.health_report() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9d9c8d715b72ce336e604c2c8a2b540e.asciidoc000066400000000000000000000017071506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-sort-aggregation.asciidoc:54 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "total_sales": { "sum": { "field": "price" } }, "sales_bucket_sort": { "bucket_sort": { "sort": [ { "total_sales": { "order": "desc" } } ], "size": 3 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9de10a59a5f56dd0906be627896cc789.asciidoc000066400000000000000000000004051506101734100264500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:543 [source, python] ---- resp = client.search( index="bicycles,other_cycles", query={ "match": { "description": "dutch" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9de4704d2f047dae1259249112488697.asciidoc000066400000000000000000000004711506101734100261400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-azure.asciidoc:72 [source, python] ---- resp = client.snapshot.create_repository( name="my_backup", repository={ "type": "azure", "settings": { "client": "secondary" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9de4ea9d5f3d427a71ee07d998cb5611.asciidoc000066400000000000000000000003131506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/blocks.asciidoc:138 [source, python] ---- resp = client.indices.add_block( index="my-index-000001", block="write", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9de4edafd22a8b9cb557632b2c8779cd.asciidoc000066400000000000000000000006511506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:309 [source, python] ---- resp = client.esql.query( query="\n FROM library\n | EVAL year = DATE_EXTRACT(\"year\", release_date)\n | WHERE page_count > ?1 AND author == ?2\n | STATS count = COUNT(*) by year\n | WHERE count > ?3\n | LIMIT 5\n ", params=[ 300, "Frank Herbert", 0 ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e0e3ce27967f164f4585c5231ba9c75.asciidoc000066400000000000000000000004371506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/search-as-you-type.asciidoc:71 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "my_field": "quick brown fox jump lazy dog" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e3c28d5820c38ea117eb2e9a5061089.asciidoc000066400000000000000000000005451506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:321 [source, python] ---- resp = client.search( index="test", query={ "rank_feature": { "field": "pagerank", "sigmoid": { "pivot": 7, "exponent": 0.6 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e563b8d5a7845f644db8d5bbf453eb6.asciidoc000066400000000000000000000007041506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/put-synonyms-set.asciidoc:67 [source, python] ---- resp = client.synonyms.put_synonym( id="my-synonyms-set", synonyms_set=[ { "id": "test-1", "synonyms": "hello, hi" }, { "synonyms": "bye, goodbye" }, { "id": "test-2", "synonyms": "test => check" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e5ae957fd0663662bfbed9d1effe99e.asciidoc000066400000000000000000000006441506101734100270450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:559 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "description": "Set '_routing' to 'geoip.country_iso_code' value", "field": "_routing", "value": "{{{geoip.country_iso_code}}}" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e962baf1fb407c21d6c47dcd37cec29.asciidoc000066400000000000000000000007201506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:255 [source, python] ---- resp = client.render_search_template( source={ "query": { "match": { "message": "{{query_string}}" } }, "from": "{{from}}{{^from}}0{{/from}}", "size": "{{size}}{{^size}}10{{/size}}" }, params={ "query_string": "hello world" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9e9717d9108ae1425bfacf71c7c44539.asciidoc000066400000000000000000000003021506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat.asciidoc:127 [source, python] ---- resp = client.cat.indices( bytes="b", s="store.size:desc,index:asc", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9eda9c39428b0c2c53cbd8ee7ae0f888.asciidoc000066400000000000000000000005421506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:1016 [source, python] ---- resp = client.security.saml_authenticate( content="PHNhbWxwOlJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46b2FzaXM6bmFtZXM6dGM6U0FNTDoyLjA6cHJvdG9jb2wiIHhtbG5zOnNhbWw9InVybjpvYXNpczpuYW1lczp0YzpTQU1MOjIuMD.....", ids=[], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9eef31d85ebaf6c27054d7375715dbe0.asciidoc000066400000000000000000000017451506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/actions.asciidoc:228 [source, python] ---- resp = client.watcher.put_watch( id="log_event_watch", trigger={ "schedule": { "interval": "5m" } }, input={ "search": { "request": { "indices": "log-events", "body": { "query": { "match": { "status": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, actions={ "log_hits": { "foreach": "ctx.payload.hits.hits", "max_iterations": 500, "logging": { "text": "Found id {{ctx.payload._id}} with field {{ctx.payload._source.my_field}}" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f04cc1a0c6cdb3ed2247f1399713767.asciidoc000066400000000000000000000004631506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/keyword.asciidoc:31 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "tags": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f0a0029982d9b3423a2a3de1f1b5136.asciidoc000066400000000000000000000037531506101734100263300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cartesian-centroid-aggregation.asciidoc:13 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (491.2350 5237.4081)", "city": "Amsterdam", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (490.1618 5236.9219)", "city": "Amsterdam", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (491.4722 5237.1667)", "city": "Amsterdam", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (440.5200 5122.2900)", "city": "Antwerp", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (233.6389 4886.1111)", "city": "Paris", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (232.7000 4886.0000)", "city": "Paris", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggs={ "centroid": { "cartesian_centroid": { "field": "location" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/9f22a0920cc763eefa233ced963d9624.asciidoc000066400000000000000000000004541506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-term-query.asciidoc:34 [source, python] ---- resp = client.search( query={ "span_term": { "user.id": { "term": "kimchy", "boost": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f286416f1b18940f13cb27ab5c8458e.asciidoc000066400000000000000000000014031506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/pattern_replace-tokenfilter.asciidoc:133 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "filter": [ "my_pattern_replace_filter" ] } }, "filter": { "my_pattern_replace_filter": { "type": "pattern_replace", "pattern": "[£|€]", "replacement": "", "all": False } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f3341489fefd38c4e439c29f6dcb86c.asciidoc000066400000000000000000000011221506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-set-query.asciidoc:224 [source, python] ---- resp = client.search( index="job-candidates", query={ "terms_set": { "programming_languages": { "terms": [ "c++", "java", "php" ], "minimum_should_match_script": { "source": "Math.min(params.num_terms, doc['required_matches'].value)" }, "boost": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f66b5243050f71ed51bc787a7ac1218.asciidoc000066400000000000000000000010201506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:215 [source, python] ---- resp = client.bulk( index="index2", refresh=True, operations=[ { "index": { "_id": "doc1" } }, { "comment": "run with scissors" }, { "index": { "_id": "doc2" } }, { "comment": "running with scissors" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f7671119236423e0e40801ef6485af1.asciidoc000066400000000000000000000004211506101734100261170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/uppercase-tokenfilter.asciidoc:30 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "uppercase" ], text="the Quick FoX JUMPs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9f99be2d58c48a6bf8e892aa24604197.asciidoc000066400000000000000000000003611506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/update-dfanalytics.asciidoc:98 [source, python] ---- resp = client.ml.update_data_frame_analytics( id="loganalytics", model_memory_limit="200mb", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9fa55fc76ec4bd81f372e9389f1da851.asciidoc000066400000000000000000000004501506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:318 [source, python] ---- resp = client.indices.put_settings( index="my-data-stream", settings={ "index": { "refresh_interval": "30s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9fda516a5dc60ba477b970eaad4429db.asciidoc000066400000000000000000000003251506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/get-lifecycle.asciidoc:148 [source, python] ---- resp = client.indices.get_data_lifecycle( name="my-data-stream*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9feff356f302ea4915347ab71cc4887a.asciidoc000066400000000000000000000005611506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-template.asciidoc:241 [source, python] ---- resp = client.indices.simulate_template( index_patterns=[ "my-index-*" ], composed_of=[ "ct2" ], priority=10, template={ "settings": { "index.number_of_replicas": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ff9b2a73419a6c82f17a358b4991499.asciidoc000066400000000000000000000005301506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:165 [source, python] ---- resp = client.close_point_in_time( id="46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/9ffe41322c095af1b6ea45a79b640a6f.asciidoc000066400000000000000000000015631506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-within-query.asciidoc:11 [source, python] ---- resp = client.search( query={ "span_within": { "little": { "span_term": { "field1": "foo" } }, "big": { "span_near": { "clauses": [ { "span_term": { "field1": "bar" } }, { "span_term": { "field1": "baz" } } ], "slop": 5, "in_order": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a00311843b5f8f3e9f7d511334a828b1.asciidoc000066400000000000000000000002651506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-caps.asciidoc:98 [source, python] ---- resp = client.rollup.get_rollup_caps( id="sensor-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a008f42379930edc354b4074e0a33344.asciidoc000066400000000000000000000003671506101734100261700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:116 [source, python] ---- resp = client.index( index="index", id="1", document={ "designation": "spoon", "price": 13 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a01753fa7b4ba6dc19054f4f42d91cd9.asciidoc000066400000000000000000000010051506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:620 [source, python] ---- resp = client.render_search_template( source="{ \"query\": { \"bool\": { \"filter\": [ { \"range\": { \"@timestamp\": { \"gte\": {{#year_scope}} \"now-1y/d\" {{/year_scope}} {{^year_scope}} \"now-1d/d\" {{/year_scope}} , \"lt\": \"now/d\" }}}, { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", params={ "year_scope": True, "user_id": "kimchy" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a037beb3d02296e1d36dd43ef5c935dd.asciidoc000066400000000000000000000005151506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keyword-repeat-tokenfilter.asciidoc:49 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "keyword_repeat" ], text="fox running and jumping", explain=True, attributes="keyword", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0497157fdefecd04e597edb800a1a95.asciidoc000066400000000000000000000004201506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:513 [source, python] ---- resp = client.search( source="obj.*", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a04a8d90f8245ff5f30a9983909faa1d.asciidoc000066400000000000000000000022741506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:427 [source, python] ---- resp = client.indices.create( index="my_queries1", settings={ "analysis": { "analyzer": { "wildcard_prefix": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "wildcard_edge_ngram" ] } }, "filter": { "wildcard_edge_ngram": { "type": "edge_ngram", "min_gram": 1, "max_gram": 32 } } } }, mappings={ "properties": { "query": { "type": "percolator" }, "my_field": { "type": "text", "fields": { "prefix": { "type": "text", "analyzer": "wildcard_prefix", "search_analyzer": "standard" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0871be90badeecd2f8d8ec90230e248.asciidoc000066400000000000000000000021651506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/pattern-replace-charfilter.asciidoc:104 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "char_filter": [ "my_char_filter" ], "filter": [ "lowercase" ] } }, "char_filter": { "my_char_filter": { "type": "pattern_replace", "pattern": "(?<=\\p{Lower})(?=\\p{Upper})", "replacement": " " } } } }, mappings={ "properties": { "text": { "type": "text", "analyzer": "my_analyzer" } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="The fooBarBaz method", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a0a7557bb7e2aff7918557cd648f41af.asciidoc000066400000000000000000000011361506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:127 [source, python] ---- resp = client.search( index="index", aggs={ "price_ranges": { "range": { "field": "price", "ranges": [ { "to": 10 }, { "from": 10, "to": 100 }, { "from": 100 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0c64894f14d28b7e0c902add71d2e9a.asciidoc000066400000000000000000000003651506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:511 [source, python] ---- resp = client.cluster.put_settings( persistent={ "xpack.profiling.templates.enabled": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0c868282c0514a342ad04998cdc2175.asciidoc000066400000000000000000000003721506101734100262510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:367 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", conflicts="proceed", query={ "match_all": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0d53dcb3df938fc0a01d248571a41e4.asciidoc000066400000000000000000000015671506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:246 [source, python] ---- resp = client.search( runtime_mappings={ "price.discounted": { "type": "double", "script": "\n double price = doc['price'].value;\n if (doc['product'].value == 'mad max') {\n price *= 0.8;\n }\n emit(price);\n " } }, size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "price": { "histogram": { "interval": 5, "field": "price.discounted" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a0f4e902d18460337684d74ea932fbe9.asciidoc000066400000000000000000000003211506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:263 [source, python] ---- resp = client.update( index="test", id="1", doc={ "name": "new_name" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1070cf2f5969d42d71cda057223f152.asciidoc000066400000000000000000000002431506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:248 [source, python] ---- resp = client.cat.shards( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1377b32d7fe3680079ae0df73009b0e.asciidoc000066400000000000000000000013741506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:293 [source, python] ---- resp = client.search( index="sales", query={ "term": { "tags": "car" } }, aggs={ "by_sale": { "nested": { "path": "comments" }, "aggs": { "by_user": { "terms": { "field": "comments.username", "size": 1 }, "aggs": { "by_nested": { "top_hits": {} } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1490f71d705053951870fd2d3bceb39.asciidoc000066400000000000000000000007541506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/enabled.asciidoc:99 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "session_data": { "type": "object", "enabled": False } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="session_1", document={ "session_data": "foo bar" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a159143bb578403bb9c7ff37d635d7ad.asciidoc000066400000000000000000000006701506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/predicate-tokenfilter.asciidoc:20 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "predicate_token_filter", "script": { "source": "\n token.term.length() > 3\n " } } ], text="the fox jumps the lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a159e1ce0cba7a35ce44db9bebad22f3.asciidoc000066400000000000000000000002261506101734100271140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-get.asciidoc:132 [source, python] ---- resp = client.slm.get_lifecycle() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a162eb50853331c80596f5994e9d1c38.asciidoc000066400000000000000000000004411506101734100262130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:212 [source, python] ---- resp = client.search_application.render_query( name="my_search_application", params={ "query_string": "rock climbing" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a180c97f8298fb2388fdcaf7b2e1b81e.asciidoc000066400000000000000000000010631506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:440 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="nightly-snapshots", schedule="0 30 2 * * ?", name="", repository="my_repository", config={ "indices": "*", "include_global_state": True, "feature_states": [ "kibana", "security" ] }, retention={ "expire_after": "30d", "min_count": 5, "max_count": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1879930c1dac36a57d7f094a680420b.asciidoc000066400000000000000000000013361506101734100263360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohashgrid-aggregation.asciidoc:130 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "zoomed-in": { "filter": { "geo_bounding_box": { "location": { "top_left": "u17", "bottom_right": "u17" } } }, "aggregations": { "zoom1": { "geohash_grid": { "field": "location", "precision": 8 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a197076e0e74951ea88f20309ec257e2.asciidoc000066400000000000000000000015331506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/condition-tokenfilter.asciidoc:125 [source, python] ---- resp = client.indices.create( index="palindrome_list", settings={ "analysis": { "analyzer": { "whitespace_reverse_first_token": { "tokenizer": "whitespace", "filter": [ "reverse_first_token" ] } }, "filter": { "reverse_first_token": { "type": "condition", "filter": [ "reverse" ], "script": { "source": "token.getPosition() === 0" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1acf454bd6477183ce27ace872deb46.asciidoc000066400000000000000000000017351506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:169 [source, python] ---- resp = client.security.put_role( name="test_role7", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "a.*" ], "except": [ "a.b*" ] } } ], ) print(resp) resp1 = client.security.put_role( name="test_role8", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "a.b*" ], "except": [ "a.b.c*" ] } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a1b668795243398f5bc40bcc9bead884.asciidoc000066400000000000000000000016431506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:254 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "my_range": { "type": "long_range" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "my_range": [ { "gte": 200, "lte": 300 }, { "gte": 1, "lte": 100 }, { "gte": 200, "lte": 300 }, { "gte": 200, "lte": 500 } ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a1ccd51eef37e43c935a047b0ee15daa.asciidoc000066400000000000000000000003021506101734100267510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:401 [source, python] ---- resp = client.indices.rollover( alias="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1d0603b24a5b048f0959975d8057534.asciidoc000066400000000000000000000005451506101734100261230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:360 [source, python] ---- resp = client.termvectors( index="my-index-000001", doc={ "fullname": "John Doe", "text": "test test test" }, fields=[ "fullname" ], per_field_analyzer={ "fullname": "keyword" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1dcc6668d13271c8207ff5ff1d35492.asciidoc000066400000000000000000000003001506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/index-mgmt.asciidoc:215 [source, python] ---- resp = client.indices.get( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1dda7e7c01be96a4acf7b725d70385f.asciidoc000066400000000000000000000013051506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:684 [source, python] ---- resp = client.search( index="index", retriever={ "text_similarity_reranker": { "retriever": { "standard": { "query": { "match_phrase": { "text": "landmark in Paris" } } } }, "field": "text", "inference_id": "my-cohere-rerank-model", "inference_text": "Most famous landmark in Paris", "rank_window_size": 100, "min_score": 0.5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1e5884051755b5a5f4d7549f319f4c7.asciidoc000066400000000000000000000010651506101734100263020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/nested-aggregation.asciidoc:13 [source, python] ---- resp = client.indices.create( index="products", mappings={ "properties": { "resellers": { "type": "nested", "properties": { "reseller": { "type": "keyword" }, "price": { "type": "double" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1e5f3956f9a697e79478fc9a6e30e1f.asciidoc000066400000000000000000000004241506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/thai-tokenizer.asciidoc:20 [source, python] ---- resp = client.indices.analyze( tokenizer="thai", text="การที่ได้ต้องแสดงว่างานดี", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a1f70bc71b763b58206814c40a7440e7.asciidoc000066400000000000000000000004711506101734100262450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/update-settings.asciidoc:47 [source, python] ---- resp = client.perform_request( "PUT", "/_watcher/settings", headers={"Content-Type": "application/json"}, body={ "index.auto_expand_replicas": "0-4" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a21319c9eff1ac47d7fe7490f1ef2efa.asciidoc000066400000000000000000000004331506101734100270060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/decimal-digit-tokenfilter.asciidoc:20 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "decimal_digit" ], text="१-one two-२ ३", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a21a7bf052b41f5b996dc58f7b69770f.asciidoc000066400000000000000000000002741506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:323 [source, python] ---- resp = client.ml.set_upgrade_mode( enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a253a1712953f7292bdd646c48ec7fd2.asciidoc000066400000000000000000000003531506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:240 [source, python] ---- resp = client.search( index="my-index-000001", sort="@timestamp:desc", size="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a28111cdd9b5aaea96c779cbfbf38780.asciidoc000066400000000000000000000021001506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:482 [source, python] ---- resp = client.indices.create( index="czech_example", settings={ "analysis": { "filter": { "czech_stop": { "type": "stop", "stopwords": "_czech_" }, "czech_keywords": { "type": "keyword_marker", "keywords": [ "příklad" ] }, "czech_stemmer": { "type": "stemmer", "language": "czech" } }, "analyzer": { "rebuilt_czech": { "tokenizer": "standard", "filter": [ "lowercase", "czech_stop", "czech_keywords", "czech_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2abd6b6b6b6df7c574a557b5468b5e1.asciidoc000066400000000000000000000012311506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:191 [source, python] ---- resp = client.indices.create( index="index2", mappings={ "properties": { "comment": { "type": "text", "analyzer": "standard", "term_vector": "with_positions_offsets", "fields": { "english": { "type": "text", "analyzer": "english", "term_vector": "with_positions_offsets" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2b2ce031120dac49b5120b26eea8758.asciidoc000066400000000000000000000002771506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/indices.asciidoc:119 [source, python] ---- resp = client.cat.indices( index="my-index-*", v=True, s="index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2bab367f0e598ae27a2f4ec82e778e9.asciidoc000066400000000000000000000014451506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/migrating-to-downsampling.asciidoc:25 [source, python] ---- resp = client.rollup.put_job( id="sensor", index_pattern="sensor-*", rollup_index="sensor_rollup", cron="0 0 * * * *", page_size=1000, groups={ "date_histogram": { "field": "timestamp", "fixed_interval": "60m" }, "terms": { "fields": [ "node" ] } }, metrics=[ { "field": "temperature", "metrics": [ "min", "max", "sum" ] }, { "field": "voltage", "metrics": [ "avg" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2bd0782aadfd0a902d7f590ee7f49fe.asciidoc000066400000000000000000000006361506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:44 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "content_embedding": { "type": "sparse_vector" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2c3e284354e8d49cf51bb8dd5ef3613.asciidoc000066400000000000000000000002621506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/upgrade-transforms.asciidoc:103 [source, python] ---- resp = client.transform.upgrade_transforms() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a2dabdcbb661e7690166ae6d0de27e46.asciidoc000066400000000000000000000003101506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/alias.asciidoc:55 [source, python] ---- resp = client.field_caps( index="trips", fields="route_*,transit_mode", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a322c8c73d6f2f5e1e375588ed20b636.asciidoc000066400000000000000000000006571506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:149 [source, python] ---- resp = client.security.put_role( name="remote-search", indices=[ { "names": [ "target-indices" ], "privileges": [ "read", "read_cross_cluster" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a325f31e94fb1e8739258910593504a8.asciidoc000066400000000000000000000004111506101734100261260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/oidc-guide.asciidoc:610 [source, python] ---- resp = client.security.put_role( name="facilitator-role", cluster=[ "manage_oidc", "manage_token" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3464bd6f0a61623562162859566b078.asciidoc000066400000000000000000000004771506101734100260530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:75 [source, python] ---- resp = client.ccr.follow( index="kibana_sample_data_ecommerce2", wait_for_active_shards="1", remote_cluster="clusterA", leader_index="kibana_sample_data_ecommerce", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a34d70d7022eb4ba48909d440c80390f.asciidoc000066400000000000000000000004421506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // api-conventions.asciidoc:164 [source, python] ---- resp = client.search( index=",,", query={ "match": { "test": "data" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a34e758e019f563d323ca90ad9fd6e3e.asciidoc000066400000000000000000000002501506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:268 [source, python] ---- resp = client.indices.get_alias( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3646b59da66b9ab68bdbc8dc2e6a9be.asciidoc000066400000000000000000000013561506101734100270740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:159 [source, python] ---- resp = client.search( index="restaurants", retriever={ "standard": { "query": { "bool": { "should": [ { "match": { "region": "Austria" } } ], "filter": [ { "term": { "year": "2019" } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3779f21f132787c48681bfb50453592.asciidoc000066400000000000000000000011171506101734100261350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/ip-location.asciidoc:85 [source, python] ---- resp = client.ingest.put_pipeline( id="ip_location", description="Add ip geolocation info", processors=[ { "ip_location": { "field": "ip" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="ip_location", document={ "ip": "89.160.20.128" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/a38f29375eabd0103f8d7c00b17bb0ab.asciidoc000066400000000000000000000002421506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/delayed.asciidoc:82 [source, python] ---- resp = client.cluster.health() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3a14f7f0e80725f695a901a7e1d579d.asciidoc000066400000000000000000000003701506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/trim-tokenfilter.asciidoc:65 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "trim" ], text=" fox ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3a2856ac2338a624a1fa5f31aec4db4.asciidoc000066400000000000000000000004171506101734100266120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:98 [source, python] ---- resp = client.security.create_api_key( name="my-api-key", role_descriptors={}, metadata={ "application": "myapp" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3a64d568fe93a22b042a8b31b9905b0.asciidoc000066400000000000000000000015611506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-pipeline.asciidoc:309 [source, python] ---- resp = client.ingest.simulate( verbose=True, pipeline={ "description": "_description", "processors": [ { "set": { "field": "field2", "value": "_value2" } }, { "set": { "field": "field3", "value": "_value3" } } ] }, docs=[ { "_index": "index", "_id": "id", "_source": { "foo": "bar" } }, { "_index": "index", "_id": "id", "_source": { "foo": "rab" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3c8f474b0700711a356682f37e62b39.asciidoc000066400000000000000000000010421506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:174 [source, python] ---- resp = client.indices.create( index="azure-ai-studio-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1536, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3ce0cfe2176f3d8a36959a5916995f0.asciidoc000066400000000000000000000002421506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:283 [source, python] ---- resp = client.tasks.list( group_by="none", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3cfd350c73a104b99a998c6be931408.asciidoc000066400000000000000000000002451506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/state.asciidoc:164 [source, python] ---- resp = client.cluster.state( metric="blocks", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3d13833714f9bb918e5e0f62a49bd0e.asciidoc000066400000000000000000000010751506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/iprange-aggregation.asciidoc:114 [source, python] ---- resp = client.search( index="ip_addresses", size=0, aggs={ "ip_ranges": { "ip_range": { "field": "ip", "ranges": [ { "to": "10.0.0.5" }, { "from": "10.0.0.5" } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3d943ac9d45b4eff4aa0c679b4eceb3.asciidoc000066400000000000000000000003621506101734100270570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/dangling-index-import.asciidoc:19 [source, python] ---- resp = client.dangling_indices.import_dangling_index( index_uuid="", accept_data_loss=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3e79d6c626a490341c5b731acbb4a5d.asciidoc000066400000000000000000000002641506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:313 [source, python] ---- resp = client.exists_source( index="my-index-000001", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3f19f3787cb331f230cdac67ff578e8.asciidoc000066400000000000000000000005241506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:660 [source, python] ---- resp = client.search( aggs={ "tags": { "significant_terms": { "field": "tags", "execution_hint": "map" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3f3c1f3f31dbd225da5fd14633bc4a0.asciidoc000066400000000000000000000002761506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:131 [source, python] ---- resp = client.get( index="users", id="0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3f56fa16c6cc67c2db31a4ba9ca11a7.asciidoc000066400000000000000000000005501506101734100267530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:56 [source, python] ---- resp = client.enrich.put_policy( name="networks-policy", range={ "indices": "networks", "match_field": "range", "enrich_fields": [ "name", "department" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a3f66deb467df86edbf66e1dca31da51.asciidoc000066400000000000000000000006071506101734100270650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:189 [source, python] ---- resp = client.search( index="music", source="suggest", suggest={ "song-suggest": { "prefix": "nir", "completion": { "field": "suggest", "size": 5 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a412fe22a74900c72434391ed75139dc.asciidoc000066400000000000000000000013671506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohexgrid-aggregation.asciidoc:105 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "zoomed-in": { "filter": { "geo_bounding_box": { "location": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } }, "aggregations": { "zoom1": { "geohex_grid": { "field": "location", "precision": 12 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a425fcab60f603504becee7d001f0a4b.asciidoc000066400000000000000000000003671506101734100266670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/prioritization.asciidoc:48 [source, python] ---- resp = client.indices.put_settings( index="index_4", settings={ "index.priority": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a428d518162918733d49261ffd65cfc1.asciidoc000066400000000000000000000007511506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/unique-tokenfilter.asciidoc:95 [source, python] ---- resp = client.indices.create( index="custom_unique_example", settings={ "analysis": { "analyzer": { "standard_truncate": { "tokenizer": "standard", "filter": [ "unique" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a43954d055f042d625a905513821f5f0.asciidoc000066400000000000000000000007471506101734100261170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:824 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "knn_field": "image-vector", "query_vector": [ -5, 9, -12 ], "k": 10, "num_candidates": 100, "fields": [ "title", "file-type" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a45244aa3adbf3c793fede100786d1f5.asciidoc000066400000000000000000000005701506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:17 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "auto_date_histogram": { "field": "date", "buckets": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a45605347d6438e7aecdf3b37198616d.asciidoc000066400000000000000000000006201506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/move-to-step.asciidoc:156 [source, python] ---- resp = client.ilm.move_to_step( index="my-index-000001", current_step={ "phase": "new", "action": "complete", "name": "complete" }, next_step={ "phase": "warm", "action": "forcemerge", "name": "forcemerge" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a45810722dc4f468f81b1e8a451d21be.asciidoc000066400000000000000000000003641506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/network/tracers.asciidoc:16 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.http.HttpTracer": "TRACE" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a45d80a3fdba70c1b1ba493e51652c8a.asciidoc000066400000000000000000000007251506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:284 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "multipoint", "coordinates": [ [ 1002, 1002 ], [ 1003, 2000 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a45eb0cdd138d9c894ca2de9352549a1.asciidoc000066400000000000000000000011721506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/getting-started.asciidoc:27 [source, python] ---- resp = client.watcher.put_watch( id="log_error_watch", trigger={ "schedule": { "interval": "10s" } }, input={ "search": { "request": { "indices": [ "logs" ], "body": { "query": { "match": { "message": "error" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a46f566ca031375658c22f89b87dc6d2.asciidoc000066400000000000000000000003461506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:379 [source, python] ---- resp = client.cat.indices( index=".ml-anomalies-custom-example", v=True, h="index,store.size", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a49acb27f56fe799a9b1342f85cba0f3.asciidoc000066400000000000000000000007711506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc:137 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "filter": [ "word_delimiter_graph" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4a3c3cd09efa75168dab90105afb2e9.asciidoc000066400000000000000000000003361506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/get-inference.asciidoc:74 [source, python] ---- resp = client.inference.get( task_type="sparse_embedding", inference_id="my-elser-model", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4bae4d956bc0a663f42cfec36bf8e0b.asciidoc000066400000000000000000000007371506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:150 [source, python] ---- resp = client.indices.create( index="index", mappings={ "properties": { "price_range": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="index", id="1", document={ "designation": "spoon", "price": 13, "price_range": "10-100" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a4bd9bf52b4f098838d12bcb8dfc3482.asciidoc000066400000000000000000000012651506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/min-bucket-aggregation.asciidoc:42 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "min_monthly_sales": { "min_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4dbd52004f3ab1580eb73997f77dcab.asciidoc000066400000000000000000000027171506101734100266410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/ecommerce-tutorial.asciidoc:165 [source, python] ---- resp = client.transform.put_transform( transform_id="ecommerce-customer-transform", source={ "index": [ "kibana_sample_data_ecommerce" ], "query": { "bool": { "filter": { "term": { "currency": "EUR" } } } } }, pivot={ "group_by": { "customer_id": { "terms": { "field": "customer_id" } } }, "aggregations": { "total_quantity.sum": { "sum": { "field": "total_quantity" } }, "taxless_total_price.sum": { "sum": { "field": "taxless_total_price" } }, "total_quantity.max": { "max": { "field": "total_quantity" } }, "order_id.cardinality": { "cardinality": { "field": "order_id" } } } }, dest={ "index": "ecommerce-customers" }, retention_policy={ "time": { "field": "order_date", "max_age": "60d" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4e510aa9145ccedae151c4a6634f0a4.asciidoc000066400000000000000000000004231506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stemmer-tokenfilter.asciidoc:23 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "stemmer" ], text="the foxes jumping quickly", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4ee2214d621bcfaf768c46d21325958.asciidoc000066400000000000000000000006541506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:74 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="hugging_face_embeddings", inference_config={ "service": "hugging_face", "service_settings": { "api_key": "", "url": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a4f259522b4dc10a0323aff58236c2c2.asciidoc000066400000000000000000000005721506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:47 [source, python] ---- resp = client.index( index="music", id="1", refresh=True, document={ "suggest": { "input": [ "Nevermind", "Nirvana" ], "weight": 34 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a512e4dd8880ce0395937db1bab1d205.asciidoc000066400000000000000000000003351506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/edgengram-tokenizer.asciidoc:28 [source, python] ---- resp = client.indices.analyze( tokenizer="edge_ngram", text="Quick Fox", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a520168c1c8b454a8f102d6a13027c73.asciidoc000066400000000000000000000003001506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/get-follow-info.asciidoc:149 [source, python] ---- resp = client.ccr.follow_info( index="follower_index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5217a93efabceee9be19949e484f930.asciidoc000066400000000000000000000007001506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:83 [source, python] ---- resp = client.index( index="music", id="1", refresh=True, document={ "suggest": [ { "input": "Nevermind", "weight": 10 }, { "input": "Nirvana", "weight": 3 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a547bb926c25f670078b98fbe67de3cc.asciidoc000066400000000000000000000003471506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/delete-synonym-rule.asciidoc:108 [source, python] ---- resp = client.synonyms.delete_synonym_rule( set_id="my-synonyms-set", rule_id="test-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a56c20a733a350673d41829c8daaafbe.asciidoc000066400000000000000000000007531506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/deciders/fixed-decider.asciidoc:37 [source, python] ---- resp = client.autoscaling.put_autoscaling_policy( name="my_autoscaling_policy", policy={ "roles": [ "data_hot" ], "deciders": { "fixed": { "storage": "1tb", "memory": "32gb", "processors": 2.3, "nodes": 8 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a594f05459d9eecc8050c73fc8da336f.asciidoc000066400000000000000000000010131506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:129 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="azure_openai_embeddings", inference_config={ "service": "azureopenai", "service_settings": { "api_key": "", "resource_name": "", "deployment_id": "", "api_version": "2024-02-01" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5a58e8ad66afe831bc295500e3e8739.asciidoc000066400000000000000000000005331506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-unfollow.asciidoc:45 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "unfollow": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5a5fb129de2f492e8fd33043a73439c.asciidoc000066400000000000000000000014751506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/dictionary-decompounder-tokenfilter.asciidoc:152 [source, python] ---- resp = client.indices.create( index="dictionary_decompound_example", settings={ "analysis": { "analyzer": { "standard_dictionary_decompound": { "tokenizer": "standard", "filter": [ "22_char_dictionary_decompound" ] } }, "filter": { "22_char_dictionary_decompound": { "type": "dictionary_decompounder", "word_list_path": "analysis/example_word_list.txt", "max_subword_size": 22 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5aeb2c8bdf91f6146026ec8edc476b6.asciidoc000066400000000000000000000012421506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date_nanos.asciidoc:155 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "date": { "type": "date_nanos" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "date": [ "2015-01-01T12:10:30.000Z", "2014-01-01T12:10:30.000Z" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a5b59f0170a2feaa39e40243fd7ae359.asciidoc000066400000000000000000000017161506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-client.asciidoc:196 [source, python] ---- resp = client.search_application.put( name="my-example-app", search_application={ "indices": [ "my-example-app" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"must\": [\n {{#query}}\n {\n \"query_string\": {\n \"query\": \"{{query}}\",\n \"search_fields\": {{#toJson}}search_fields{{/toJson}}\n }\n }\n {{/query}}\n ]\n }\n }\n }\n ", "params": { "query": "", "search_fields": "" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5dfcfd1cfb3558e7912456669c92eee.asciidoc000066400000000000000000000003351506101734100266660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-prepare-authentication-api.asciidoc:85 [source, python] ---- resp = client.security.saml_prepare_authentication( realm="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5e2b3588258430f2e595abda98e3943.asciidoc000066400000000000000000000003111506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-cache.asciidoc:60 [source, python] ---- resp = client.security.clear_cached_realms( realms="default_file", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5e6ad9e65615f6f92ae6a19674dd742.asciidoc000066400000000000000000000012101506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:595 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "documents": [ { "message": "Japanse art" }, { "message": "Holand culture" }, { "message": "Japanese art and Holand culture" }, { "message": "no-match" } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5e6ccfb6019238e6db602373b9af147.asciidoc000066400000000000000000000003451506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-existing-data-stream.asciidoc:19 [source, python] ---- resp = client.indices.put_data_lifecycle( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5e793d82a4455cf4105dac82a156617.asciidoc000066400000000000000000000005401506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:214 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", rewrite=True, query={ "more_like_this": { "like": { "_id": "2" }, "boost_terms": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5ebcd70c34d1ece77a4fb27cc050917.asciidoc000066400000000000000000000007321506101734100267070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-rank-aggregation.asciidoc:76 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_ranks": { "percentile_ranks": { "field": "load_time", "values": [ 500, 600 ], "keyed": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a5f9eb40087921e67d820775acf71522.asciidoc000066400000000000000000000004571506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:218 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "city": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a60aaed30d7d26eaacbb2c0ed4ddc66d.asciidoc000066400000000000000000000003301506101734100272410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex-cancel.asciidoc:41 [source, python] ---- resp = client.indices.cancel_migrate_reindex( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6169bc057ce8654bd306ff4b062081b.asciidoc000066400000000000000000000006201506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:279 [source, python] ---- resp = client.search( index="music", pretty=True, suggest={ "song-suggest": { "prefix": "nor", "completion": { "field": "suggest", "skip_duplicates": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6204edaa0bcf7b82a89ab4f6bda0914.asciidoc000066400000000000000000000003241506101734100267550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/open-job.asciidoc:74 [source, python] ---- resp = client.ml.open_job( job_id="low_request_rate", timeout="35m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a62833baf15f2c9ac094a9289e56a012.asciidoc000066400000000000000000000004051506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:166 [source, python] ---- resp = client.index( index="timeseries", document={ "message": "logged the request", "@timestamp": "1591890611" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a63e0d0504e0c9313814b7f4e2641353.asciidoc000066400000000000000000000040661506101734100261650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:340 [source, python] ---- resp = client.indices.create( index="metrics_index", mappings={ "properties": { "network": { "properties": { "name": { "type": "keyword" } } }, "latency_histo": { "type": "histogram" } } }, ) print(resp) resp1 = client.index( index="metrics_index", id="1", refresh=True, document={ "network.name": "net-1", "latency_histo": { "values": [ 1, 3, 8, 12, 15 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp1) resp2 = client.index( index="metrics_index", id="2", refresh=True, document={ "network.name": "net-2", "latency_histo": { "values": [ 1, 6, 8, 12, 14 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp2) resp3 = client.search( index="metrics_index", size="0", filter_path="aggregations", aggs={ "latency_ranges": { "range": { "field": "latency_histo", "ranges": [ { "to": 2 }, { "from": 2, "to": 3 }, { "from": 3, "to": 10 }, { "from": 10 } ] } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/a669e9d56e34c95ef4c780e92ed307f1.asciidoc000066400000000000000000000003241506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1425 [source, python] ---- resp = client.eql.get( id="FjlmbndxNmJjU0RPdExBTGg0elNOOEEaQk9xSjJBQzBRMldZa1VVQ2pPa01YUToxMDY=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a675fafa7c688cb3ea1be09bf887ebf0.asciidoc000066400000000000000000000004511506101734100270720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex.asciidoc:310 [source, python] ---- resp = client.indices.get( index=".migrated-ds-my-data-stream-2025.01.23-000001", human=True, filter_path="*.settings.index.version.created_string", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a692b4c0ca7825c467880b346841f5a5.asciidoc000066400000000000000000000006361506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:162 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "name": { "properties": { "first": { "type": "text" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a699189c8d1a7573beeaea768f2fc618.asciidoc000066400000000000000000000005471506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:436 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="snapshot-20200617", indices="kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", include_aliases=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a69b1ce5cc9528fb3639185eaf241ae3.asciidoc000066400000000000000000000003521506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/clear-scroll-api.asciidoc:31 [source, python] ---- resp = client.clear_scroll( scroll_id="DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6b2815d54df34b6b8d00226e9a1af0c.asciidoc000066400000000000000000000007111506101734100265360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/field-mappings.asciidoc:59 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "my_embeddings.predicted_value": { "type": "dense_vector", "dims": 384 }, "my_text_field": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6bb306ca250cf651f19cae808b97012.asciidoc000066400000000000000000000002561506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index.asciidoc:17 [source, python] ---- resp = client.indices.get( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6be6c1cb4a556866fdccb0dee2f1dea.asciidoc000066400000000000000000000003031506101734100272030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/index-template-exists-v1.asciidoc:23 [source, python] ---- resp = client.indices.exists_template( name="template_1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6ccac9f80c5e5efdaab992f3a32d919.asciidoc000066400000000000000000000003541506101734100270670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:407 [source, python] ---- resp = client.indices.get_data_stream( name="dsl-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6ef8cd8c8218d547727ffc5485bfbd7.asciidoc000066400000000000000000000012761506101734100267010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/daterange-aggregation.asciidoc:85 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "range": { "date_range": { "field": "date", "missing": "1976/11/30", "ranges": [ { "key": "Older", "to": "2016/02/01" }, { "key": "Newer", "from": "2016/02/01", "to": "now/d" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a6fdd0100cd362df54af6c95d1055c96.asciidoc000066400000000000000000000002701506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-mapping.asciidoc:17 [source, python] ---- resp = client.indices.get_mapping( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a71154ea11a5214f409ecfd118e9b5e3.asciidoc000066400000000000000000000015741506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:1049 [source, python] ---- resp = client.security.saml_invalidate( query="SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", realm="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a72613de3774571ba24def4b495161b5.asciidoc000066400000000000000000000004631506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:428 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "user_id": { "type": "alias", "path": "user_identifier" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a735081e715d385b4d471eea0f2b57da.asciidoc000066400000000000000000000003611506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:249 [source, python] ---- resp = client.cluster.put_settings( persistent={ "slm.retention_schedule": "0 30 1 * * ?" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a73a9a6f19516b8ead63182a9ae5b540.asciidoc000066400000000000000000000006141506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:330 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTILINESTRING ((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0), (1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0), (1000.2 0.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a75765e3fb130421dde6c3c2f12e8acb.asciidoc000066400000000000000000000005451506101734100266260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/claim-connector-sync-job-api.asciidoc:69 [source, python] ---- resp = client.perform_request( "PUT", "/_connector/_sync_job/my-connector-sync-job-id/_claim", headers={"Content-Type": "application/json"}, body={ "worker_hostname": "some-machine" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a769d696bf12f5e9de4b3250646d250c.asciidoc000066400000000000000000000006251506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:229 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "alibabacloud-ai-search-embeddings", "pipeline": "alibabacloud_ai_search_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a78dfb844d385405d4b0fb0e09b4a5a4.asciidoc000066400000000000000000000003421506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:211 [source, python] ---- resp = client.update( index="test", id="1", script="ctx._source['my-object'].remove('my-subfield')", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a799477dff04578b200788a63f9cff71.asciidoc000066400000000000000000000012251506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/iprange-aggregation.asciidoc:162 [source, python] ---- resp = client.search( index="ip_addresses", size=0, aggs={ "ip_ranges": { "ip_range": { "field": "ip", "ranges": [ { "key": "infinity", "to": "10.0.0.5" }, { "key": "and-beyond", "from": "10.0.0.5" } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a7cf31f4b907e4c00132aca75f55790c.asciidoc000066400000000000000000000002751506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-pipeline.asciidoc:79 [source, python] ---- resp = client.ingest.delete_pipeline( id="pipeline-one", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a7d814caf2a995d2aeadecc3495011be.asciidoc000066400000000000000000000012261506101734100267660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/boolean.asciidoc:248 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "bool": { "type": "boolean" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "bool": [ True, False, True, False ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a7e58d4dc477a84c1306fd5749aafd8b.asciidoc000066400000000000000000000007011506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/explicit-mapping.asciidoc:20 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "age": { "type": "integer" }, "email": { "type": "keyword" }, "name": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a7fb1c0d0827d66bfa66016f2564b10c.asciidoc000066400000000000000000000004461506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:139 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n process where process.name == \"regsvr32.exe\" and process.command_line.keyword != null\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a8019280dab5b04211ae3b21e5e08223.asciidoc000066400000000000000000000005031506101734100262730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/register-fs-repo.asciidoc:107 [source, python] ---- resp = client.snapshot.create_repository( name="my_fs_backup", repository={ "type": "fs", "settings": { "location": "My_fs_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a810da963d3b28d79dcd17be829bb271.asciidoc000066400000000000000000000006721506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:620 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "user.id": "kimchy" } }, docvalue_fields=[ "user.id", "http.response.*", { "field": "date", "format": "epoch_millis" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a811b82ba4632bdd9065829085188bc9.asciidoc000066400000000000000000000003431506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:50 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="my_snapshot", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a84bc239eb2f607e8bed1fdb70d63823.asciidoc000066400000000000000000000006521506101734100266430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/deciders/proactive-storage-decider.asciidoc:28 [source, python] ---- resp = client.autoscaling.put_autoscaling_policy( name="my_autoscaling_policy", policy={ "roles": [ "data_hot" ], "deciders": { "proactive_storage": { "forecast_window": "10m" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a861a89f52008610e813b9f073951c58.asciidoc000066400000000000000000000002541506101734100261300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/stats.asciidoc:135 [source, python] ---- resp = client.indices.stats( metric="merge,refresh", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a89052bcdfe40e604a98d12be6ae59d2.asciidoc000066400000000000000000000003611506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:474 [source, python] ---- resp = client.index( index="example", document={ "location": "BBOX (100.0, 102.0, 2.0, 0.0)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a8add749c3f41ad1308a45308df14103.asciidoc000066400000000000000000000013401506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:277 [source, python] ---- resp = client.index( index="sales", id="1", refresh=True, document={ "tags": [ "car", "auto" ], "comments": [ { "username": "baddriver007", "comment": "This car could have better brakes" }, { "username": "dr_who", "comment": "Where's the autopilot? Can't find it" }, { "username": "ilovemotorbikes", "comment": "This car has two extra wheels" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a8dff54362184b2732b9bd248cf6df8a.asciidoc000066400000000000000000000011671506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:418 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "my_range": { "type": "integer_range" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "my_range": { "lte": 2147483647 } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a9280b55a7284952f604ec7bece712f6.asciidoc000066400000000000000000000006331506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1186 [source, python] ---- resp = client.search( index="my-index-000001", query={ "range": { "voltage_corrected": { "gte": 16, "lte": 20, "boost": 1 } } }, fields=[ "voltage_corrected", "node" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a941fd568f2e20e13df909ab24506073.asciidoc000066400000000000000000000004371506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // monitoring/production.asciidoc:52 [source, python] ---- resp = client.cluster.get_settings() print(resp) resp1 = client.cluster.put_settings( persistent={ "xpack.monitoring.collection.enabled": False }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/a9541c64512ebc5fcff2dc48487dc0b7.asciidoc000066400000000000000000000004101506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:16 [source, python] ---- resp = client.esql.query( format="txt", query="FROM library | KEEP author, name, page_count, release_date | SORT page_count DESC | LIMIT 5", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9554396506888e392a1aee0ca28e6fc.asciidoc000066400000000000000000000017561506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:329 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "my-index-2099.05.06-000001", "alias": "my-alias", "filter": { "bool": { "filter": [ { "range": { "@timestamp": { "gte": "now-1d/d", "lt": "now/d" } } }, { "term": { "user.id": "kimchy" } } ] } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a95a123b9f862e52ab1e8f875961c852.asciidoc000066400000000000000000000004651506101734100263630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-multiple-indices.asciidoc:124 [source, python] ---- resp = client.search( indices_boost=[ { "my-index-000001": 1.4 }, { "my-index-000002": 1.3 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a95ae76fca7c3e273e4bd10323b3caa6.asciidoc000066400000000000000000000010151506101734100266750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:119 [source, python] ---- resp = client.ingest.put_pipeline( id="azure_openai_embeddings_pipeline", processors=[ { "inference": { "model_id": "azure_openai_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a960b43e720b4934edb74ab4b085ca77.asciidoc000066400000000000000000000002441506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connectors-api.asciidoc:88 [source, python] ---- resp = client.connector.list() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a97aace57c6442bbb90e1e14effbcda3.asciidoc000066400000000000000000000003611506101734100271250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:118 [source, python] ---- resp = client.sql.query( format="csv", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a97f984c01fa1d96e6d33a0e8e2cb90f.asciidoc000066400000000000000000000005771506101734100266600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:20 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "query": { "type": "percolator" }, "field": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a985e6b7b2ead9c3f30a9bc97d8b598e.asciidoc000066400000000000000000000002541506101734100267500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/field-caps.asciidoc:201 [source, python] ---- resp = client.field_caps( fields="rating,title", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a98692a565904ec0783884d81a7b71fc.asciidoc000066400000000000000000000002251506101734100263040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/health.asciidoc:87 [source, python] ---- resp = client.cat.health( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a999b5661bebb802bbbfe04faacf1971.asciidoc000066400000000000000000000004041506101734100267710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:511 [source, python] ---- resp = client.reindex( source={ "index": "my-index-2099.10.*" }, dest={ "index": "my-index-2099.10" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a99bc141066ef673e35f306157750ec9.asciidoc000066400000000000000000000004131506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/lowercase-tokenizer.asciidoc:20 [source, python] ---- resp = client.indices.analyze( tokenizer="lowercase", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a99bf70ae38bdf1c6f350140b25e0422.asciidoc000066400000000000000000000004651506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-shard-routing.asciidoc:125 [source, python] ---- resp = client.search( index="my-index-000001", routing="my-routing-value", query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9c08023354aa9b9023807962df71d13.asciidoc000066400000000000000000000003071506101734100261710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/forcemerge.asciidoc:189 [source, python] ---- resp = client.indices.forcemerge( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9d44463dcea3cb0ea4c8f8460cea524.asciidoc000066400000000000000000000010241506101734100267040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohexgrid-aggregation.asciidoc:176 [source, python] ---- resp = client.search( index="museums", size="0", aggregations={ "tiles-in-bounds": { "geohex_grid": { "field": "location", "precision": 12, "bounds": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9dd5cd3f2b31e7c8129ea63bab868b4.asciidoc000066400000000000000000000026231506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:656 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1", "index2" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"bool\": {\n \"should\": [\n {{#elser_fields}}\n {\n \"sparse_vector\": {\n \"field\": \"ml.inference.{{.}}_expanded.predicted_value\",\n \"inference_id\": \"\",\n \"query\": \"{{query_string}}\"\n }\n },\n {{/elser_fields}}\n ]\n }\n },\n \"min_score\": \"{{min_score}}\"\n }\n ", "params": { "query_string": "*", "min_score": "10", "elser_fields": [ { "name": "title" }, { "name": "description" } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9dd9595e96c307b8c798beaeb571521.asciidoc000066400000000000000000000004531506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc:83 [source, python] ---- resp = client.ml.upgrade_job_snapshot( job_id="low_request_rate", snapshot_id="1828371", timeout="45m", wait_for_completion=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9f14efc26fdd3c37a71f06c310163d9.asciidoc000066400000000000000000000013021506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/retriever.asciidoc:650 [source, python] ---- resp = client.search( retriever={ "text_similarity_reranker": { "retriever": { "standard": { "query": { "match": { "text": "How often does the moon hide the sun?" } } } }, "field": "text", "inference_id": "my-elastic-rerank", "inference_text": "How often does the moon hide the sun?", "rank_window_size": 100, "min_score": 0.5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/a9fe70387d9c96a07830e1859c57efbb.asciidoc000066400000000000000000000004121506101734100265250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:154 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "number_of_shards": 3, "number_of_replicas": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa1771b702f4b771491ba4ab743a9197.asciidoc000066400000000000000000000004031506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-tier-capacity.asciidoc:245 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", name="index.number_of_replicas", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa3284717241ed79d3d1d3bdbbdce598.asciidoc000066400000000000000000000004211506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/lowercase-tokenfilter.asciidoc:20 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "lowercase" ], text="THE Quick FoX JUMPs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa5c0fa51a3553ce7caa763c3832120d.asciidoc000066400000000000000000000007451506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:603 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="monthly-snapshots", name="", schedule="0 56 23 1 * ?", repository="my_repository", config={ "indices": "*", "include_global_state": True }, retention={ "expire_after": "366d", "min_count": 1, "max_count": 12 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa5fbb68d3a8e0d0c894791cb6cf0b13.asciidoc000066400000000000000000000007501506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/reverse-tokenfilter.asciidoc:79 [source, python] ---- resp = client.indices.create( index="reverse_example", settings={ "analysis": { "analyzer": { "whitespace_reverse": { "tokenizer": "whitespace", "filter": [ "reverse" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa6282d4bc92c753c4bd7a5b166abece.asciidoc000066400000000000000000000005011506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/start-trained-model-deployment.asciidoc:166 [source, python] ---- resp = client.ml.start_trained_model_deployment( model_id="elastic__distilbert-base-uncased-finetuned-conll03-english", wait_for="started", timeout="1m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa676d54a59dee87ecd28bcc1edce59b.asciidoc000066400000000000000000000010641506101734100271550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-alibabacloud-ai-search.asciidoc:192 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="alibabacloud_ai_search_rerank", inference_config={ "service": "alibabacloud-ai-search", "service_settings": { "api_key": "", "service_id": "ops-bge-reranker-larger", "host": "default-j01.platform-cn-shanghai.opensearch.aliyuncs.com", "workspace": "default" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa699ff3234f54d091575a38e859a627.asciidoc000066400000000000000000000005361506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:287 [source, python] ---- resp = client.search( index="my-index-000001", typed_keys=True, aggs={ "my-agg-name": { "histogram": { "field": "my-field", "interval": 1000 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa7cf5df36b867aee5e3314ac4b4fa68.asciidoc000066400000000000000000000010511506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-put.asciidoc:124 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="daily-snapshots", schedule="0 30 1 * * ?", name="", repository="my_repository", config={ "indices": [ "data-*", "important" ], "ignore_unavailable": False, "include_global_state": False }, retention={ "expire_after": "30d", "min_count": 5, "max_count": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa7f62279b487989440d423c1ed4a1c0.asciidoc000066400000000000000000000005101506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/restore-snapshot-api.asciidoc:94 [source, python] ---- resp = client.indices.get_index_template( name="*", filter_path="index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aa814309ad5f1630886ba75255b444f5.asciidoc000066400000000000000000000002721506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:104 [source, python] ---- resp = client.cluster.pending_tasks() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aaa7a61b07861235fb6e489b946c705c.asciidoc000066400000000000000000000004521506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:487 [source, python] ---- resp = client.index( index="my-index-000001", id="1", version="2", version_type="external", document={ "user": { "id": "elkbee" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aab3de5a8a3fefbe012fc2ed50dfe4d6.asciidoc000066400000000000000000000002771506101734100272670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // searchable-snapshots/apis/node-cache-stats.asciidoc:102 [source, python] ---- resp = client.searchable_snapshots.cache_stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aab810de3314d5e11bd564ea096785b8.asciidoc000066400000000000000000000006421506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:428 [source, python] ---- resp = client.search( index="cooking_blog", query={ "bool": { "filter": [ { "term": { "category.keyword": "Breakfast" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aaba346e0becdf12db13658296e0b8a1.asciidoc000066400000000000000000000004341506101734100266740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/error-handling.asciidoc:42 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.number_of_shards": 2, "index.lifecycle.name": "shrink-index" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aac5996a8398cc8f7701a063df0b2346.asciidoc000066400000000000000000000010361506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:716 [source, python] ---- resp = client.security.put_role_mapping( name="saml-finance", roles=[ "finance_data" ], enabled=True, rules={ "all": [ { "field": { "realm.name": "saml1" } }, { "field": { "groups": "finance-team" } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aad7d80990a6a3c391ff555ce09ae9dc.asciidoc000066400000000000000000000011541506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/numeric.asciidoc:295 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "f": { "type": "scaled_float", "scaling_factor": 0.01 } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "f": 123 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/aadf36ae37460a735e06b953b4cee494.asciidoc000066400000000000000000000021271506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/frequent-item-sets-aggregation.asciidoc:301 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", runtime_mappings={ "price_range": { "type": "keyword", "script": { "source": "\n def bucket_start = (long) Math.floor(doc['taxful_total_price'].value / 50) * 50;\n def bucket_end = bucket_start + 50;\n emit(bucket_start.toString() + \"-\" + bucket_end.toString());\n " } } }, size=0, aggs={ "my_agg": { "frequent_item_sets": { "minimum_set_size": 4, "fields": [ { "field": "category.keyword" }, { "field": "price_range" }, { "field": "geoip.city_name" } ], "size": 3 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ab0fd1908c9957cc7f63165c156e48cd.asciidoc000066400000000000000000000022161506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/enabled.asciidoc:17 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "user_id": { "type": "keyword" }, "last_updated": { "type": "date" }, "session_data": { "type": "object", "enabled": False } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="session_1", document={ "user_id": "kimchy", "session_data": { "arbitrary_object": { "some_array": [ "foo", "bar", { "baz": 2 } ] } }, "last_updated": "2015-12-06T18:20:22" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="session_2", document={ "user_id": "jpountz", "session_data": "none", "last_updated": "2015-12-06T18:22:13" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ab1372270c11bcd6f36d1a13e6c69276.asciidoc000066400000000000000000000007341506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:414 [source, python] ---- resp = client.async_search.submit( index="my-index-000001,cluster_one:my-index-000001,cluster_two:my-index-000001", ccs_minimize_roundtrips=True, query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ab1a989958c1d345a9dc3dd36ad90c27.asciidoc000066400000000000000000000005621506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:242 [source, python] ---- resp = client.index( index="example", document={ "location": "POLYGON ((1000.0 1000.0, 1001.0 1000.0, 1001.0 1001.0, 1000.0 1001.0, 1000.0 1000.0), (1000.2 1000.2, 1000.8 1000.2, 1000.8 1000.8, 1000.2 1000.8, 1000.2 1000.2))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ab24bfdfd8c1c7b3044b21a3b4684370.asciidoc000066400000000000000000000011141506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/fields.asciidoc:167 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "cost_price": 100 }, ) print(resp) resp1 = client.search( index="my-index-000001", script_fields={ "sales_price": { "script": { "lang": "expression", "source": "doc['cost_price'] * markup", "params": { "markup": 0.2 } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ab29bfbd35ee482cf54052b03d62cd31.asciidoc000066400000000000000000000013331506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geodistance-aggregation.asciidoc:96 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "rings": { "geo_distance": { "field": "location", "origin": "POINT (4.894 52.3760)", "unit": "km", "ranges": [ { "to": 100 }, { "from": 100, "to": 300 }, { "from": 300 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ab317aa09c4bd44abbf02517141e37ef.asciidoc000066400000000000000000000013251506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/term-vector.asciidoc:35 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text": { "type": "text", "term_vector": "with_positions_offsets" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Quick brown fox" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match": { "text": "brown fox" } }, highlight={ "fields": { "text": {} } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ab3c36b70459093beafbfd3a7ae75b9b.asciidoc000066400000000000000000000020401506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:386 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "date": "2015-10-01T05:30:00Z" }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh=True, document={ "date": "2015-10-01T06:30:00Z" }, ) print(resp1) resp2 = client.search( index="my-index-000001", size="0", aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "date", "calendar_interval": "day", "offset": "+6h", "format": "iso8601" } } } ] } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ab8b4537fad80107bc88f633d4039a52.asciidoc000066400000000000000000000003341506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:216 [source, python] ---- resp = client.indices.create( index="logs", aliases={ "": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ab8de34fcfc0277901cb39618ecfc9d5.asciidoc000066400000000000000000000003461506101734100267330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/allocation-explain.asciidoc:108 [source, python] ---- resp = client.cluster.allocation_explain( index="my-index-000001", shard=0, primary=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/abb4a58089574211d434946a923e5725.asciidoc000066400000000000000000000052371506101734100261320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/inference-bucket-aggregation.asciidoc:95 [source, python] ---- resp = client.search( index="kibana_sample_data_logs", size=0, aggs={ "client_ip": { "composite": { "sources": [ { "client_ip": { "terms": { "field": "clientip" } } } ] }, "aggs": { "url_dc": { "cardinality": { "field": "url.keyword" } }, "bytes_sum": { "sum": { "field": "bytes" } }, "geo_src_dc": { "cardinality": { "field": "geo.src" } }, "geo_dest_dc": { "cardinality": { "field": "geo.dest" } }, "responses_total": { "value_count": { "field": "timestamp" } }, "success": { "filter": { "term": { "response": "200" } } }, "error404": { "filter": { "term": { "response": "404" } } }, "error503": { "filter": { "term": { "response": "503" } } }, "malicious_client_ip": { "inference": { "model_id": "malicious_clients_model", "buckets_path": { "response_count": "responses_total", "url_dc": "url_dc", "bytes_sum": "bytes_sum", "geo_src_dc": "geo_src_dc", "geo_dest_dc": "geo_dest_dc", "success": "success._count", "error404": "error404._count", "error503": "error503._count" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/abc280775734daa6cf2c28868e155d10.asciidoc000066400000000000000000000012221506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/weighted-avg-aggregation.asciidoc:101 [source, python] ---- resp = client.index( index="exams", refresh=True, document={ "grade": [ 1, 2, 3 ], "weight": 2 }, ) print(resp) resp1 = client.search( index="exams", size=0, aggs={ "weighted_grade": { "weighted_avg": { "value": { "field": "grade" }, "weight": { "field": "weight" } } } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/abc496de5fd013099a134db369b34a8b.asciidoc000066400000000000000000000010071506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/sum-aggregation.asciidoc:109 [source, python] ---- resp = client.search( index="sales", size="0", query={ "constant_score": { "filter": { "match": { "type": "hat" } } } }, aggs={ "hat_prices": { "sum": { "field": "price", "missing": 100 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/abc7a670a47516b58b6b07d7497b140c.asciidoc000066400000000000000000000023651506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/search-speed.asciidoc:272 [source, python] ---- resp = client.search( index="index", query={ "constant_score": { "filter": { "bool": { "should": [ { "range": { "my_date": { "gte": "now-1h", "lte": "now-1h/m" } } }, { "range": { "my_date": { "gt": "now-1h/m", "lt": "now/m" } } }, { "range": { "my_date": { "gte": "now/m", "lte": "now" } } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/abd4fc3ce7784413a56fe2dcfe2809b5.asciidoc000066400000000000000000000004121506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:754 [source, python] ---- resp = client.search( index="test", filter_path="hits.total", query={ "match": { "flag": "foo" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/abdbc81e799e28c833556b1c29f03ba6.asciidoc000066400000000000000000000002411506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-users.asciidoc:118 [source, python] ---- resp = client.security.get_user() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac22cc2b0f4ad659055feed2852a2d59.asciidoc000066400000000000000000000024741506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:1485 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "text_similarity_reranker": { "retriever": { "text_similarity_reranker": { "retriever": { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } }, "rank_window_size": 100, "field": "text", "inference_id": "my-rerank-model", "inference_text": "What are the state of the art applications of AI in information retrieval?" } }, "rank_window_size": 10, "field": "text", "inference_id": "my-other-more-expensive-rerank-model", "inference_text": "Applications of Large Language Models in technology and their impact on user satisfaction" } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac366b9dda7040e743dee85335354094.asciidoc000066400000000000000000000005701506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:116 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ { "type": "shingle", "min_shingle_size": 2, "max_shingle_size": 3 } ], text="quick brown fox jumps", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac483996d479946d57c374c3a86b2621.asciidoc000066400000000000000000000005151506101734100262300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/search-as-you-type.asciidoc:18 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_field": { "type": "search_as_you_type" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac497917ef707538198a8458ae3d5c6b.asciidoc000066400000000000000000000003511506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-query.asciidoc:165 [source, python] ---- resp = client.search( query={ "match": { "message": "this is a test" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac5b91aa75696f9880451c9439fd9eec.asciidoc000066400000000000000000000014431506101734100265350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:461 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "my_range": { "type": "date_range" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "my_range": [ { "gte": 1504224000000, "lte": 1504569600000 }, { "gte": "2017-09-01", "lte": "2017-09-10" } ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ac73895ca1882cd1ac65b1facfbb5c63.asciidoc000066400000000000000000000004051506101734100267670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:10 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", query={ "match": { "user.id": "elkbee" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac8328bc51fd396b3ce5f7ef3e1e73df.asciidoc000066400000000000000000000002541506101734100270130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:64 [source, python] ---- resp = client.snapshot.get_repository() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac85e05c0bf2fd5099fbcb9c492f447e.asciidoc000066400000000000000000000004041506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-settings.asciidoc:73 [source, python] ---- resp = client.cluster.put_settings( flat_settings=True, transient={ "indices.recovery.max_bytes_per_sec": "20mb" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ac9fe9b64891095bcf84066f719b3dc4.asciidoc000066400000000000000000000005751506101734100265340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-source-only.asciidoc:41 [source, python] ---- resp = client.snapshot.create_repository( name="my_src_only_repository", repository={ "type": "source", "settings": { "delegate_type": "fs", "location": "my_backup_repository" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/acb10091ad335ddd15d71021aaf23c62.asciidoc000066400000000000000000000007301506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:631 [source, python] ---- resp = client.search( track_scores=True, sort=[ { "post_date": { "order": "desc" } }, { "name": "desc" }, { "age": "desc" } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/acb850c08f51226eadb75be09e336076.asciidoc000066400000000000000000000003531506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/async-search.asciidoc:259 [source, python] ---- resp = client.async_search.status( id="FmRldE8zREVEUzA2ZVpUeGs2ejJFUFEaMkZ5QTVrSTZSaVN3WlNFVmtlWHJsdzoxMDc=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/acc44366a9908684b2c8c2b119a4fb2b.asciidoc000066400000000000000000000013051506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-using-query-rules.asciidoc:202 [source, python] ---- resp = client.search( index="my-index-000001", retriever={ "rule": { "retriever": { "standard": { "query": { "query_string": { "query": "puggles" } } } }, "match_criteria": { "query_string": "puggles", "user_country": "us" }, "ruleset_ids": [ "my-ruleset" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/acc52da725a996ae696b00d9f818dfde.asciidoc000066400000000000000000000007231506101734100267360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:328 [source, python] ---- resp = client.indices.analyze( index="file-path-test", analyzer="custom_path_tree", text="/User/alice/photos/2017/05/16/my_photo1.jpg", ) print(resp) resp1 = client.indices.analyze( index="file-path-test", analyzer="custom_path_tree_reversed", text="/User/alice/photos/2017/05/16/my_photo1.jpg", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/acc6cd860032167e34fa5e0c043ab3b0.asciidoc000066400000000000000000000004071506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:335 [source, python] ---- resp = client.search( query={ "query_string": { "query": "city.\\*:(this AND that OR thus)" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad0dcbc7fc619e952c8825b8f307b7b2.asciidoc000066400000000000000000000006361506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:410 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "Jon", "type": "cross_fields", "fields": [ "first", "first.edge", "last", "last.edge" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad2416ca0581316cee6c63129685bca5.asciidoc000066400000000000000000000005741506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:498 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "title", "content" ], "query": "this OR that OR thus", "minimum_should_match": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad2b8aed84c67cdc295917b47a12d3dc.asciidoc000066400000000000000000000020311506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/knn-query.asciidoc:43 [source, python] ---- resp = client.bulk( index="my-image-index", refresh=True, operations=[ { "index": { "_id": "1" } }, { "image-vector": [ 1, 5, -20 ], "file-type": "jpg", "title": "mountain lake" }, { "index": { "_id": "2" } }, { "image-vector": [ 42, 8, -15 ], "file-type": "png", "title": "frozen lake" }, { "index": { "_id": "3" } }, { "image-vector": [ 15, 11, 23 ], "file-type": "jpg", "title": "mountain lake lodge" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad3b159657d4bcb373623fdc61acc3bf.asciidoc000066400000000000000000000002701506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/count.asciidoc:16 [source, python] ---- resp = client.count( index="my-index-000001", q="user:kimchy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad57ccba0a060da4f5313692fa26a235.asciidoc000066400000000000000000000024331506101734100265270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date_nanos.asciidoc:30 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "date": { "type": "date_nanos" } } }, ) print(resp) resp1 = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": { "_id": "1" } }, { "date": "2015-01-01" }, { "index": { "_id": "2" } }, { "date": "2015-01-01T12:10:30.123456789Z" }, { "index": { "_id": "3" } }, { "date": 1420070400000 } ], ) print(resp1) resp2 = client.search( index="my-index-000001", sort={ "date": "asc" }, runtime_mappings={ "date_has_nanos": { "type": "boolean", "script": "emit(doc['date'].value.nano != 0)" } }, fields=[ { "field": "date", "format": "strict_date_optional_time_nanos" }, { "field": "date_has_nanos" } ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ad63eca6829a25293c9be589c1870547.asciidoc000066400000000000000000000014301506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:298 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_moving_sum": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.sum(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad6d81be5fad4bad87486b699454dce5.asciidoc000066400000000000000000000010041506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/t-test-aggregation.asciidoc:32 [source, python] ---- resp = client.search( index="node_upgrade", size=0, aggs={ "startup_time_ttest": { "t_test": { "a": { "field": "startup_time_before" }, "b": { "field": "startup_time_after" }, "type": "paired" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad88e46bb06739991498dee248850223.asciidoc000066400000000000000000000002231506101734100262240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/thread_pool.asciidoc:142 [source, python] ---- resp = client.cat.thread_pool() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad92a1a8bb1b0f26d1536fe8ba4ffd17.asciidoc000066400000000000000000000004531506101734100267650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/render-search-template-api.asciidoc:39 [source, python] ---- resp = client.render_search_template( id="my-search-template", params={ "query_string": "hello world", "from": 20, "size": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ad9889fd8a4b5930e312a51f3bc996dc.asciidoc000066400000000000000000000011151506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elasticsearch.asciidoc:140 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="my-elser-model", inference_config={ "service": "elasticsearch", "service_settings": { "adaptive_allocations": { "enabled": True, "min_number_of_allocations": 1, "max_number_of_allocations": 4 }, "num_threads": 1, "model_id": ".elser_model_2" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ada2675a9c631da2bfe627fc2618f5ed.asciidoc000066400000000000000000000006541506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/script-score-query.asciidoc:18 [source, python] ---- resp = client.search( query={ "script_score": { "query": { "match": { "message": "elasticsearch" } }, "script": { "source": "doc['my-int'].value / 10 " } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/adc18ca0c344d81d68ec3b9422b54ff5.asciidoc000066400000000000000000000011231506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/multi-search.asciidoc:318 [source, python] ---- resp = client.msearch( index="my-index-000001", searches=[ {}, { "query": { "match_all": {} }, "from": 0, "size": 10 }, {}, { "query": { "match_all": {} } }, { "index": "my-index-000002" }, { "query": { "match_all": {} } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/adced6e22ef03c2ae3b14aa5bdd24fd9.asciidoc000066400000000000000000000003351506101734100271740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/data-stream-reindex-status.asciidoc:130 [source, python] ---- resp = client.indices.get_migrate_reindex_status( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/add240aa149d8b11139947502b279ee0.asciidoc000066400000000000000000000004161506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:403 [source, python] ---- resp = client.scroll( scroll="1m", scroll_id="DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/add82cbe7cd95c4be5ce1c9958f2f208.asciidoc000066400000000000000000000006251506101734100270200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:335 [source, python] ---- resp = client.search( index="cooking_blog", query={ "multi_match": { "query": "vegetarian curry", "fields": [ "title^3", "description^2", "tags" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/adf36e2d8fc05c3719c91912481c4e19.asciidoc000066400000000000000000000003001506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/enable-users.asciidoc:50 [source, python] ---- resp = client.security.enable_user( username="jacknich", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/adf728b0c11c5c309c730205609a379d.asciidoc000066400000000000000000000006241506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:532 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "description": "Set dynamic '' field to 'code' value", "field": "{{{service}}}", "value": "{{{code}}}" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae0d20c2ebb59278e08a26c9634d90c9.asciidoc000066400000000000000000000003511506101734100264760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:290 [source, python] ---- resp = client.snapshot.create( repository="my_repository", snapshot="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae3473adaf1515afcf7773f26c018e5c.asciidoc000066400000000000000000000005511506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-zoom.asciidoc:60 [source, python] ---- resp = client.connector.put( connector_id="my-{service-name-stub}-connector", index_name="my-elasticsearch-index", name="Content synced from {service-name}", service_type="{service-name-stub}", is_native=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae398a6b6494e7982ef2549fc2cd2d8e.asciidoc000066400000000000000000000023061506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:353 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "full_name": { "path_match": [ "name.*", "user.name.*" ], "path_unmatch": [ "*.middle", "*.midinitial" ], "mapping": { "type": "text", "copy_to": "full_name" } } } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "name": { "first": "John", "middle": "Winston", "last": "Lennon" } }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "user": { "name": { "first": "Jane", "midinitial": "M", "last": "Salazar" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ae4aa368617637a390074535df86e64b.asciidoc000066400000000000000000000003141506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/common/apis/set-upgrade-mode.asciidoc:80 [source, python] ---- resp = client.ml.set_upgrade_mode( enabled=True, timeout="10m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae591d49e54b838c15cdcf64a8dee9c2.asciidoc000066400000000000000000000006541506101734100267440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:222 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_docs": 10000000 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae82eb17c23cb8e5761cb6240a5ed0a6.asciidoc000066400000000000000000000010411506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:793 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="student_performance_mathematics_0.3", source={ "index": "student_performance_mathematics" }, dest={ "index": "student_performance_mathematics_reg" }, analysis={ "regression": { "dependent_variable": "G3", "training_percent": 70, "randomize_seed": 19673948271 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ae9ccfaa146731ab9176df90670db1c2.asciidoc000066400000000000000000000014741506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/bulk.asciidoc:509 [source, python] ---- resp = client.bulk( operations=[ { "index": { "_index": "test", "_id": "1" } }, { "field1": "value1" }, { "delete": { "_index": "test", "_id": "2" } }, { "create": { "_index": "test", "_id": "3" } }, { "field1": "value3" }, { "update": { "_id": "1", "_index": "test" } }, { "doc": { "field2": "value2" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aeaa97939a05f5b2f3f2c43b771f35e3.asciidoc000066400000000000000000000005431506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:316 [source, python] ---- resp = client.termvectors( index="my-index-000001", id="1", fields=[ "text", "some_field_without_term_vectors" ], offsets=True, positions=True, term_statistics=True, field_statistics=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aebf9cc593fcf0d4ca08f8b61b67bf17.asciidoc000066400000000000000000000006201506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-azure.asciidoc:206 [source, python] ---- resp = client.snapshot.create_repository( name="my_backup", repository={ "type": "azure", "settings": { "client": "secondary", "container": "my_container", "base_path": "snapshots_prefix" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aee26dd62fbb6d614a0798f3344c0598.asciidoc000066400000000000000000000020031506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/reverse-nested-aggregation.asciidoc:57 [source, python] ---- resp = client.search( index="issues", query={ "match_all": {} }, aggs={ "comments": { "nested": { "path": "comments" }, "aggs": { "top_usernames": { "terms": { "field": "comments.username" }, "aggs": { "comment_to_issue": { "reverse_nested": {}, "aggs": { "top_tags_per_comment": { "terms": { "field": "tags" } } } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/aee4734ee63dbbbd12a21ee886f7a829.asciidoc000066400000000000000000000007501506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:548 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": [ -70, 40 ], "order": "asc", "unit": "km" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af00a58d9171d32f6efe52d94e51e526.asciidoc000066400000000000000000000023201506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:992 [source, python] ---- resp = client.indices.create( index="hindi_example", settings={ "analysis": { "filter": { "hindi_stop": { "type": "stop", "stopwords": "_hindi_" }, "hindi_keywords": { "type": "keyword_marker", "keywords": [ "उदाहरण" ] }, "hindi_stemmer": { "type": "stemmer", "language": "hindi" } }, "analyzer": { "rebuilt_hindi": { "tokenizer": "standard", "filter": [ "lowercase", "decimal_digit", "hindi_keywords", "indic_normalization", "hindi_normalization", "hindi_stop", "hindi_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af18f5c5fb2364ae23c6a14431820aba.asciidoc000066400000000000000000000003001506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/get-enrich-policy.asciidoc:94 [source, python] ---- resp = client.enrich.get_policy( name="my-policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af44cc7fb0c435d4497c77baf904bf5e.asciidoc000066400000000000000000000005771506101734100267370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:103 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "calendar_interval": "month" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af517b6936fa41d124d68b107b2efdc3.asciidoc000066400000000000000000000002701506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/delete-lifecycle.asciidoc:82 [source, python] ---- resp = client.ilm.delete_lifecycle( name="my_policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af607715d0693587dd12962266359a96.asciidoc000066400000000000000000000005531506101734100260670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-s3.asciidoc:232 [source, python] ---- resp = client.snapshot.create_repository( name="my_s3_repository", repository={ "type": "s3", "settings": { "bucket": "my-bucket", "another_setting": "setting-value" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af746266a49a693ff6170c88da8a8c04.asciidoc000066400000000000000000000014721506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stop-tokenfilter.asciidoc:210 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "default": { "tokenizer": "whitespace", "filter": [ "my_custom_stop_words_filter" ] } }, "filter": { "my_custom_stop_words_filter": { "type": "stop", "ignore_case": True, "stopwords": [ "and", "is", "the" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af7c5add165b005aefb552d79130fed6.asciidoc000066400000000000000000000004561506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:232 [source, python] ---- resp = client.search( index="my_locations", query={ "geo_grid": { "location": { "geotile": "6/32/22" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af84b3995564a7ca84360a526a4ac896.asciidoc000066400000000000000000000012241506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/truncate-tokenfilter.asciidoc:128 [source, python] ---- resp = client.indices.create( index="5_char_words_example", settings={ "analysis": { "analyzer": { "lowercase_5_char": { "tokenizer": "lowercase", "filter": [ "5_char_trunc" ] } }, "filter": { "5_char_trunc": { "type": "truncate", "length": 5 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af85ad2551d1cc6742c6521d71c889cc.asciidoc000066400000000000000000000005371506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/specify-analyzer.asciidoc:50 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "title": { "type": "text", "analyzer": "whitespace" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af91019991bee136df5460e2fd4ac72a.asciidoc000066400000000000000000000003071506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:243 [source, python] ---- resp = client.indices.rollover( alias="my-data-stream", lazy=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/af970eb8b93cdea52209e1256eba9d8c.asciidoc000066400000000000000000000002671506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shard-stores.asciidoc:130 [source, python] ---- resp = client.indices.shard_stores( index="test1,test2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afa11ebb493ebbfd77acbbe50d2ce6db.asciidoc000066400000000000000000000023241506101734100273370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:591 [source, python] ---- resp = client.search( index="my-data-stream", size=0, aggs={ "tsid": { "terms": { "field": "_tsid" }, "aggs": { "over_time": { "date_histogram": { "field": "@timestamp", "fixed_interval": "1d" }, "aggs": { "min": { "min": { "field": "kubernetes.container.memory.usage.bytes" } }, "max": { "max": { "field": "kubernetes.container.memory.usage.bytes" } }, "avg": { "avg": { "field": "kubernetes.container.memory.usage.bytes" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afa24b7d72c2d9f586023a49bd655ec7.asciidoc000066400000000000000000000023241506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/use-elasticsearch-for-time-series-data.asciidoc:158 [source, python] ---- resp = client.async_search.submit( index="my-data-stream", runtime_mappings={ "source.ip": { "type": "ip", "script": "\n String sourceip=grok('%{IPORHOST:sourceip} .*').extract(doc[ \"message\" ].value)?.sourceip;\n if (sourceip != null) emit(sourceip);\n " } }, query={ "bool": { "filter": [ { "range": { "@timestamp": { "gte": "now-2y/d", "lt": "now/d" } } }, { "range": { "source.ip": { "gte": "192.0.2.0", "lte": "192.0.2.255" } } } ] } }, fields=[ "*" ], source=False, sort=[ { "@timestamp": "desc" }, { "source.ip": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afadb6bb7d0fa5a4531708af1ea8f9f8.asciidoc000066400000000000000000000004231506101734100270540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-with-existing-indices.asciidoc:159 [source, python] ---- resp = client.reindex( source={ "index": "mylogs-*" }, dest={ "index": "mylogs", "op_type": "create" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afbea723c4ba0d50c67d04ebb73a4101.asciidoc000066400000000000000000000003221506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/delete-search-application.asciidoc:75 [source, python] ---- resp = client.search_application.delete( name="my-app", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afc0a9cffc0100797a3f093094394763.asciidoc000066400000000000000000000016041506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-invalidate-api.asciidoc:88 [source, python] ---- resp = client.security.saml_invalidate( query_string="SAMLRequest=nZFda4MwFIb%2FiuS%2BmviRpqFaClKQdbvo2g12M2KMraCJ9cRR9utnW4Wyi13sMie873MeznJ1aWrnS3VQGR0j4mLkKC1NUeljjA77zYyhVbIE0dR%2By7fmaHq7U%2BdegXWGpAZ%2B%2F4pR32luBFTAtWgUcCv56%2Fp5y30X87Yz1khTIycdgpUW9kY7WdsC9zxoXTvMvWuVV98YyMnSGH2SYE5pwALBIr9QKiwDGpW0oGVUznGeMyJZKFkQ4jBf5HnhUymjIhzCAL3KNFihbYx8TBYzzGaY7EnIyZwHzCWMfiDnbRIftkSjJr%2BFu0e9v%2B0EgOquRiiZjKpiVFp6j50T4WXoyNJ%2FEWC9fdqc1t%2F1%2B2F3aUpjzhPiXpqMz1%2FHSn4A&SigAlg=http%3A%2F%2Fwww.w3.org%2F2001%2F04%2Fxmldsig-more%23rsa-sha256&Signature=MsAYz2NFdovMG2mXf6TSpu5vlQQyEJAg%2B4KCwBqJTmrb3yGXKUtIgvjqf88eCAK32v3eN8vupjPC8LglYmke1ZnjK0%2FKxzkvSjTVA7mMQe2AQdKbkyC038zzRq%2FYHcjFDE%2Bz0qISwSHZY2NyLePmwU7SexEXnIz37jKC6NMEhus%3D", realm="saml1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afcacd742d18bf220e02f0bc6891526d.asciidoc000066400000000000000000000006411506101734100266220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:270 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sale_date": { "auto_date_histogram": { "field": "date", "buckets": 10, "minimum_interval": "minute" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afd90d268187f995dc002abc189f818d.asciidoc000066400000000000000000000012331506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:345 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "format": "yyyy-MM-dd" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afdb19ad1ebb4f64e235528b640817b6.asciidoc000066400000000000000000000005731506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:793 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "drop": { "description": "Drop documents with 'network.name' of 'Guest'", "if": "ctx?.network?.name == 'Guest'" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afe30f159937b38d74c869570cfcd369.asciidoc000066400000000000000000000005431506101734100264530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/restore-snapshot-api.asciidoc:274 [source, python] ---- resp = client.indices.close( index="index_1", ) print(resp) resp1 = client.snapshot.restore( repository="my_repository", snapshot="snapshot_2", wait_for_completion=True, indices="index_1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/afe5aeb9317f0ae470b28e85a8d98274.asciidoc000066400000000000000000000013711506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/null-value.asciidoc:12 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "status_code": { "type": "keyword", "null_value": "NULL" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "status_code": None }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "status_code": [] }, ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "term": { "status_code": "NULL" } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/afe87a2850326e0328fbebbefec2e839.asciidoc000066400000000000000000000003131506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-shards.asciidoc:177 [source, python] ---- resp = client.search_shards( index="my-index-000001", routing="foo,bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/afef5cac988592b97ae289ab39c2f437.asciidoc000066400000000000000000000007021506101734100266670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:307 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_field": { "type": "text", "fields": { "keyword": { "type": "keyword" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/affc7ff234dc3acccb2bf7dc51f54813.asciidoc000066400000000000000000000004331506101734100271310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/charfilters/htmlstrip-charfilter.asciidoc:21 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", char_filter=[ "html_strip" ], text="I'm so happy!

", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b00ac39faf96785e89be8d4205fb984d.asciidoc000066400000000000000000000011751506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:572 [source, python] ---- resp = client.search_application.search( name="my_search_application", params={ "text": True, "size": 5, "query_string": "mountain climbing", "text_fields": [ { "name": "title", "boost": 10 }, { "name": "description", "boost": 5 }, { "name": "state", "boost": 1 } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b00d74eed431a272c829c0f798e3a539.asciidoc000066400000000000000000000031371506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:89 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "d": { "type": "date" } } }, ) print(resp) resp1 = client.bulk( index="test", refresh=True, operations=[ { "index": {} }, { "s": 1, "m": 3.1415, "i": 1, "d": "2020-01-01T00:12:12Z", "t": "cat" }, { "index": {} }, { "s": 2, "m": 1, "i": 6, "d": "2020-01-02T00:12:12Z", "t": "dog" }, { "index": {} }, { "s": 3, "m": 2.71828, "i": -12, "d": "2019-12-31T00:12:12Z", "t": "chicken" } ], ) print(resp1) resp2 = client.search( index="test", filter_path="aggregations", aggs={ "tm": { "top_metrics": { "metrics": [ { "field": "m" }, { "field": "i" }, { "field": "d" }, { "field": "t.keyword" } ], "sort": { "s": "desc" } } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b00f3bc0e47905aaa2124d6a025c75d4.asciidoc000066400000000000000000000003461506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:21 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library ORDER BY page_count DESC LIMIT 5", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b02e4907c9936c1adc16ccce9d49900d.asciidoc000066400000000000000000000002211506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/health.asciidoc:165 [source, python] ---- resp = client.cluster.health() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b09f155602f9b2a6c40fe7c4a5436b7a.asciidoc000066400000000000000000000014201506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:151 [source, python] ---- resp = client.search( runtime_mappings={ "day_of_week": { "type": "keyword", "script": "\n emit(doc['timestamp'].value.dayOfWeekEnum\n .getDisplayName(TextStyle.FULL, Locale.ENGLISH))\n " } }, size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "dow": { "terms": { "field": "day_of_week" } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b0b1ae9582599f501f3b3ed8a42ea2af.asciidoc000066400000000000000000000005271506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/circle.asciidoc:66 [source, python] ---- resp = client.index( index="circles", id="1", pipeline="polygonize_circles", document={ "circle": "CIRCLE (30 10 40)" }, ) print(resp) resp1 = client.get( index="circles", id="1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b0bddf2ffaa83049b195829c06b875cd.asciidoc000066400000000000000000000003451506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:187 [source, python] ---- resp = client.search_application.render_query( name="my_search_application", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b0ce54ff4fec0b0c712506eb81e633f4.asciidoc000066400000000000000000000011571506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/date-index-name.asciidoc:78 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "monthly date-time index naming", "processors": [ { "date_index_name": { "field": "date1", "index_name_prefix": "my-index-", "date_rounding": "M" } } ] }, docs=[ { "_source": { "date1": "2016-04-25T12:02:01.789Z" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b0d3f839237fabf8cdc2221734c668ad.asciidoc000066400000000000000000000015351506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/distance-feature-query.asciidoc:62 [source, python] ---- resp = client.index( index="items", id="1", refresh=True, document={ "name": "chocolate", "production_date": "2018-02-01", "location": [ -71.34, 41.12 ] }, ) print(resp) resp1 = client.index( index="items", id="2", refresh=True, document={ "name": "chocolate", "production_date": "2018-01-01", "location": [ -71.3, 41.15 ] }, ) print(resp1) resp2 = client.index( index="items", id="3", refresh=True, document={ "name": "chocolate", "production_date": "2017-12-01", "location": [ -71.3, 41.12 ] }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b0eaf67e5cce24ef8889bf20951ccec1.asciidoc000066400000000000000000000010411506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:131 [source, python] ---- resp = client.search( query={ "dis_max": { "queries": [ { "match": { "subject": "brown fox" } }, { "match": { "message": "brown fox" } } ], "tie_breaker": 0.3 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b0ee6f19875fe5bad8aab02d60e3532c.asciidoc000066400000000000000000000010671506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geoip.asciidoc:85 [source, python] ---- resp = client.ingest.put_pipeline( id="geoip", description="Add ip geolocation info", processors=[ { "geoip": { "field": "ip" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="geoip", document={ "ip": "89.160.20.128" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b0fa301cd3c6b9db128e34114f0c1e8f.asciidoc000066400000000000000000000003761506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:111 [source, python] ---- resp = client.index( index="test", id="1", document={ "counter": 1, "tags": [ "red" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b0fe9a7c8e519995258786be4bef36c4.asciidoc000066400000000000000000000003101506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:170 [source, python] ---- resp = client.tasks.cancel( task_id="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b109d0141ec8a0aed5d3805abc349a20.asciidoc000066400000000000000000000014421506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:438 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.linearWeightedAvg(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b11a0675e49df0709be693297ca73a2c.asciidoc000066400000000000000000000002561506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/info.asciidoc:199 [source, python] ---- resp = client.xpack.info( categories="build,features", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b14122481ae1f158f1a9a1bfbc4a41b1.asciidoc000066400000000000000000000003301506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/secure-settings.asciidoc:39 [source, python] ---- resp = client.nodes.reload_secure_settings( secure_settings_password="keystore-password", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b16700002af3aa70639f3e88c733bf35.asciidoc000066400000000000000000000003711506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:101 [source, python] ---- resp = client.open_point_in_time( index="my-index-000001", keep_alive="1m", allow_partial_search_results=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b17143780e9904bfc1e1c53436497fa1.asciidoc000066400000000000000000000004301506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:574 [source, python] ---- resp = client.sql.query( format="json", wait_for_completion_timeout="2s", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b176e0d428726705298184ef39ad5cb2.asciidoc000066400000000000000000000006401506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:153 [source, python] ---- resp = client.security.put_role_mapping( name="mapping2", roles=[ "user", "admin" ], enabled=True, rules={ "field": { "username": [ "esadmin01", "esadmin02" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b195068563b1dc0f721f5f8c8d172312.asciidoc000066400000000000000000000003711506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:299 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTIPOINT (1002.0 2000.0, 1003.0 2000.0)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b1e81b70b874a1f0cf75a0ec6e430ddc.asciidoc000066400000000000000000000003671506101734100267060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-async-query-stop-api.asciidoc:25 [source, python] ---- resp = client.esql.async_query_stop( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b1ee1b0b5f7af596e5f81743cfd3755f.asciidoc000066400000000000000000000003441506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:375 [source, python] ---- resp = client.search( index=",,", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b1efa1c51a34dd5ab5511b71a399f5b1.asciidoc000066400000000000000000000004151506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:456 [source, python] ---- resp = client.reindex( source={ "index": "source" }, dest={ "index": "dest", "pipeline": "some_ingest_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b1f7cb4157b13368373383abd7d2b8cb.asciidoc000066400000000000000000000010141506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-connect.asciidoc:168 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_two": { "transport.compress": False }, "cluster_three": { "transport.compress": True, "transport.ping_schedule": "60s" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b22559a7c319f90bc63a41cac1c39b4c.asciidoc000066400000000000000000000003661506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:162 [source, python] ---- resp = client.security.invalidate_api_key( ids=[ "VuaCfGcBCdbkQm-e5aOx" ], owner=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b23ed357dce8ec0014708b7b2850a8fb.asciidoc000066400000000000000000000002231506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/tasks.asciidoc:84 [source, python] ---- resp = client.cat.tasks( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2440b492149b705ef107137fdccb0c2.asciidoc000066400000000000000000000002701506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/get-follow-info.asciidoc:34 [source, python] ---- resp = client.ccr.follow_info( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b24a374c0ad264abbcacb5686f5ed61c.asciidoc000066400000000000000000000004211506101734100267530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:246 [source, python] ---- resp = client.termvectors( index="text_payloads", id="1", fields=[ "text" ], payloads=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b25256ed615cd837461b0bfa590526b7.asciidoc000066400000000000000000000003431506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/pause-auto-follow-pattern.asciidoc:85 [source, python] ---- resp = client.ccr.pause_auto_follow_pattern( name="my_auto_follow_pattern", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2652b1763a5fd31e95c983869b433bd.asciidoc000066400000000000000000000023051506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/avg-aggregation.asciidoc:118 [source, python] ---- resp = client.index( index="metrics_index", id="1", document={ "network.name": "net-1", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp) resp1 = client.index( index="metrics_index", id="2", document={ "network.name": "net-2", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp1) resp2 = client.search( index="metrics_index", size="0", aggs={ "avg_latency": { "avg": { "field": "latency_histo" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b2b26f8568c5dba7649e79f09b859272.asciidoc000066400000000000000000000004351506101734100263770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/saml-guide.asciidoc:944 [source, python] ---- resp = client.security.put_user( username="saml-service-user", password="", roles=[ "saml-service-role" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2dec193082462c775169db438308bc3.asciidoc000066400000000000000000000007231506101734100262610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:46 [source, python] ---- resp = client.security.put_role( name="remote-replication", cluster=[ "read_ccr" ], indices=[ { "names": [ "leader-index-name" ], "privileges": [ "monitor", "read" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2e1e802fc3c5fbeb4190af7d598c23e.asciidoc000066400000000000000000000005241506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/index_.asciidoc:277 [source, python] ---- resp = client.index( index="my-index-000001", document={ "@timestamp": "2099-11-15T13:12:00", "message": "GET /search HTTP/1.1 200 1070000", "user": { "id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2e20bca1846d7d584626b12eae9f6dc.asciidoc000066400000000000000000000004211506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/increase-other-node-capacity.asciidoc:80 [source, python] ---- resp = client.cat.nodes( v=True, h="name,node.role,disk.used_percent,disk.used,disk.avail,disk.total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b2e4f3257c0e0aa3311f7270034bbc42.asciidoc000066400000000000000000000005451506101734100263620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-management/migrate-index-allocation-filters.asciidoc:175 [source, python] ---- resp = client.indices.put_settings( index="my-index", settings={ "index.routing.allocation.require.data": None, "index.routing.allocation.include._tier_preference": "data_hot" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3479ee4586c15020549afae58d94d65.asciidoc000066400000000000000000000013701506101734100263600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-point.asciidoc:225 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "point": { "type": "geo_point" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "point": [ { "lat": -90, "lon": -80 }, { "lat": 10, "lon": 30 } ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b3623b8c7f3e7650f52b6fb8b050f583.asciidoc000066400000000000000000000002501506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:405 [source, python] ---- resp = client.features.get_features() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3685560cb328f179d96ffe7c2668f72.asciidoc000066400000000000000000000015201506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:611 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3756e700d0f6c7e8919003bdf26bc8f.asciidoc000066400000000000000000000003461506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-unbalanced-cluster.asciidoc:76 [source, python] ---- resp = client.perform_request( "DELETE", "/_internal/desired_balance", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b37919cc438b47477343833b4e522408.asciidoc000066400000000000000000000010701506101734100260470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:424 [source, python] ---- resp = client.termvectors( index="imdb", doc={ "plot": "When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil." }, term_statistics=True, field_statistics=True, positions=False, offsets=False, filter={ "max_num_terms": 3, "min_term_freq": 1, "min_doc_freq": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3a1c4220617ded67ed43fff2051d324.asciidoc000066400000000000000000000005061506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/eager-global-ordinals.asciidoc:51 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "tags": { "type": "keyword", "eager_global_ordinals": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3a711c3deddcdb8a3f6623184a8b794.asciidoc000066400000000000000000000005031506101734100266320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:124 [source, python] ---- resp = client.update( index="test", id="1", script={ "source": "ctx._source.counter += params.count", "lang": "painless", "params": { "count": 4 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3cd07f02059165fd62a2f148be3dc58.asciidoc000066400000000000000000000012171506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/numeric.asciidoc:259 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "long": { "type": "long" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "long": [ 0, 0, -123466, 87612 ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b3ed567d2c0915a280b6b15f7a37539b.asciidoc000066400000000000000000000015051506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/percentiles-bucket-aggregation.asciidoc:43 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "percentiles_monthly_sales": { "percentiles_bucket": { "buckets_path": "sales_per_month>sales", "percents": [ 25, 50, 75 ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b3f442a7d9eb391121dcab991787f9d6.asciidoc000066400000000000000000000012311506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/binary.asciidoc:68 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "binary": { "type": "binary", "doc_values": True } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "binary": [ "IAA=", "EAA=" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b3fffd96fdb118cd059b5f1d67d928de.asciidoc000066400000000000000000000007211506101734100270220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:330 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "MultiPoint", "coordinates": [ [ 102, 2 ], [ 103, 2 ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b42e7d627cd79e4c5e7a4a3cd8b19ce0.asciidoc000066400000000000000000000020621506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:948 [source, python] ---- resp = client.ingest.put_pipeline( id="one-pipeline-to-rule-them-all", processors=[ { "pipeline": { "description": "If 'service.name' is 'apache_httpd', use 'httpd_pipeline'", "if": "ctx.service?.name == 'apache_httpd'", "name": "httpd_pipeline" } }, { "pipeline": { "description": "If 'service.name' is 'syslog', use 'syslog_pipeline'", "if": "ctx.service?.name == 'syslog'", "name": "syslog_pipeline" } }, { "fail": { "description": "If 'service.name' is not 'apache_httpd' or 'syslog', return a failure message", "if": "ctx.service?.name != 'apache_httpd' && ctx.service?.name != 'syslog'", "message": "This pipeline requires service.name to be either `syslog` or `apache_httpd`" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b430122345d560bbd2a77826f5c475f7.asciidoc000066400000000000000000000015701506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:272 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "ip_fields": { "match": [ "ip_*", "*_ip" ], "unmatch": [ "one*", "*two" ], "mapping": { "type": "ip" } } } ] }, ) print(resp) resp1 = client.index( index="my-index", id="1", document={ "one_ip": "will not match", "ip_two": "will not match", "three_ip": "12.12.12.12", "ip_four": "13.13.13.13" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b45a8c6fc746e9c90fd181e69a605fad.asciidoc000066400000000000000000000004071506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/post-inference.asciidoc:107 [source, python] ---- resp = client.inference.inference( task_type="completion", inference_id="openai_chat_completions", input="What is Elastic?", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b45c60f908b329835ab40609423f378e.asciidoc000066400000000000000000000003111506101734100262000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/increase-tier-capacity.asciidoc:272 [source, python] ---- resp = client.cat.nodes( h="node.role", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4693f2aa9fa65db04ab2499355c54fc.asciidoc000066400000000000000000000010371506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:4 [source, python] ---- resp = client.search( index="cohere-embeddings", knn={ "field": "content_embedding", "query_vector_builder": { "text_embedding": { "model_id": "cohere_embeddings", "model_text": "Muscles in human body" } }, "k": 10, "num_candidates": 100 }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b47945c7db8868dd36ba079b742f2a90.asciidoc000066400000000000000000000004461506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/put-search-application.asciidoc:202 [source, python] ---- resp = client.search_application.search( name="my-app", params={ "default_field": "author", "query_string": "Jane" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4946ecc9101b97102a1c5bcb19e5607.asciidoc000066400000000000000000000007311506101734100264050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:534 [source, python] ---- resp = client.render_search_template( source="{ \"query\": { \"bool\": { \"filter\": [ {{#year_scope}} { \"range\": { \"@timestamp\": { \"gte\": \"now-1y/d\", \"lt\": \"now/d\" } } }, {{/year_scope}} { \"term\": { \"user.id\": \"{{user_id}}\" }}]}}}", params={ "year_scope": True, "user_id": "kimchy" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4aec2a1d353852507c091bdb629b765.asciidoc000066400000000000000000000004611506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/put-filter.asciidoc:57 [source, python] ---- resp = client.ml.put_filter( filter_id="safe_domains", description="A list of safe domains", items=[ "*.google.com", "wikipedia.org" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4d1fc887e40885cdf6ac2d01487cb76.asciidoc000066400000000000000000000006461506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-multi-term-query.asciidoc:28 [source, python] ---- resp = client.search( query={ "span_multi": { "match": { "prefix": { "user.id": { "value": "ki", "boost": 1.08 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4d9d5017d42f27281e734e969949623.asciidoc000066400000000000000000000003301506101734100261430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/snapshot/corrupt-repository.asciidoc:140 [source, python] ---- resp = client.snapshot.get_repository( name="my-repo", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4da132cb934c33d61e2b60988c6d4a3.asciidoc000066400000000000000000000013521506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/serial-diff-aggregation.asciidoc:69 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "timestamp", "calendar_interval": "day" }, "aggs": { "the_sum": { "sum": { "field": "lemmings" } }, "thirtieth_difference": { "serial_diff": { "buckets_path": "the_sum", "lag": 30 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b4f3165e873f551fbaa03945877eb370.asciidoc000066400000000000000000000006711506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:126 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_date_formats": [ "yyyy/MM", "MM/dd/yyyy" ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "create_date": "09/25/2015" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b4f4c9ad3301c97fb3c38d108a3bc453.asciidoc000066400000000000000000000016421506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-connect.asciidoc:125 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_one": { "seeds": [ "127.0.0.1:{remote-interface-default-port}" ] }, "cluster_two": { "mode": "sniff", "seeds": [ "127.0.0.1:{remote-interface-default-port-plus1}" ], "transport.compress": True, "skip_unavailable": True }, "cluster_three": { "mode": "proxy", "proxy_address": "127.0.0.1:{remote-interface-default-port-plus2}" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b504119238b44cddd3b5944da20a498d.asciidoc000066400000000000000000000004521506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:214 [source, python] ---- resp = client.index( index="example", document={ "location": "POLYGON ((1000.0 -1001.0, 1001.0 -1001.0, 1001.0 -1000.0, 1000.0 -1000.0, 1000.0 -1001.0))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b515427f8685ca7d79176def672d19fa.asciidoc000066400000000000000000000004341506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:618 [source, python] ---- resp = client.indices.refresh() print(resp) resp1 = client.search( index="my-index-000001", size="0", q="extra:test", filter_path="hits.total", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b52951b78cd5fb2f9353d1c7e6d37070.asciidoc000066400000000000000000000005361506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/wildcard-query.asciidoc:21 [source, python] ---- resp = client.search( query={ "wildcard": { "user.id": { "value": "ki*y", "boost": 1, "rewrite": "constant_score_blended" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b557f114e21dbc6f531d4e7621a08e8f.asciidoc000066400000000000000000000016741506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/source-field.asciidoc:80 [source, python] ---- resp = client.indices.create( index="logs", mappings={ "_source": { "includes": [ "*.count", "meta.*" ], "excludes": [ "meta.description", "meta.other.*" ] } }, ) print(resp) resp1 = client.index( index="logs", id="1", document={ "requests": { "count": 10, "foo": "bar" }, "meta": { "name": "Some metric", "description": "Some metric description", "other": { "foo": "one", "baz": "two" } } }, ) print(resp1) resp2 = client.search( index="logs", query={ "match": { "meta.other.foo": "one" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b573e893de0d5f92d67f4f5eb7f0c353.asciidoc000066400000000000000000000012731506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/stats-bucket-aggregation.asciidoc:41 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "stats_monthly_sales": { "stats_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b583bf8d3a2f49d633aa2cfed5606418.asciidoc000066400000000000000000000011261506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-component-template.asciidoc:196 [source, python] ---- resp = client.cluster.put_component_template( name="template_1", template={ "settings": { "number_of_shards": 1 }, "aliases": { "alias1": {}, "alias2": { "filter": { "term": { "user.id": "kimchy" } }, "routing": "shard-1" }, "{index}-alias": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b58b17975bbce307b2ccce5051a449e8.asciidoc000066400000000000000000000005321506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:538 [source, python] ---- resp = client.search( index="my-index-000001", size="0", filter_path="hits.total", query={ "range": { "http.response.bytes": { "lt": 2000000 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b590241c4296299b836fbb5a95bdd2dc.asciidoc000066400000000000000000000005241506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:299 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "avg_order_value": { "avg": { "field": "taxful_total_price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b5bc1bb7278f2f95bc54790c78c928e0.asciidoc000066400000000000000000000015661506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/get-job.asciidoc:170 [source, python] ---- resp = client.rollup.put_job( id="sensor2", index_pattern="sensor-*", rollup_index="sensor_rollup", cron="*/30 * * * * ?", page_size=1000, groups={ "date_histogram": { "field": "timestamp", "fixed_interval": "1h", "delay": "7d" }, "terms": { "fields": [ "node" ] } }, metrics=[ { "field": "temperature", "metrics": [ "min", "max", "sum" ] }, { "field": "voltage", "metrics": [ "avg" ] } ], ) print(resp) resp1 = client.rollup.get_jobs( id="_all", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b5e5cd4eccc40d7c5f2a1fcb654bd4a4.asciidoc000066400000000000000000000014261506101734100271250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/diversified-sampler-aggregation.asciidoc:33 [source, python] ---- resp = client.search( index="stackoverflow", size="0", query={ "query_string": { "query": "tags:elasticsearch" } }, aggs={ "my_unbiased_sample": { "diversified_sampler": { "shard_size": 200, "field": "author" }, "aggs": { "keywords": { "significant_terms": { "field": "tags", "exclude": [ "elasticsearch" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b601bc78fb69e15a42e0783219ddc38d.asciidoc000066400000000000000000000012651506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/max-bucket-aggregation.asciidoc:42 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "max_monthly_sales": { "max_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b607eea422295a3e9acd75f9ed1c8cb7.asciidoc000066400000000000000000000005471506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:372 [source, python] ---- resp = client.search( sort=[ { "price": { "missing": "_last" } } ], query={ "term": { "product": "chocolate" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b61afb7ca29a11243232ffcc8b5a43cf.asciidoc000066400000000000000000000003231506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:171 [source, python] ---- resp = client.indices.get_field_mapping( index="publications", fields="a*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b620ef4400d2f660fe2c67835938442c.asciidoc000066400000000000000000000003461506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/delete-autoscaling-policy.asciidoc:68 [source, python] ---- resp = client.autoscaling.delete_autoscaling_policy( name="my_autoscaling_policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b62eaa20c4e0e48134a6d1d1b3c30b26.asciidoc000066400000000000000000000132001506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // text-structure/apis/find-field-structure.asciidoc:95 [source, python] ---- resp = client.bulk( refresh=True, operations=[ { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:36,256][INFO ][o.a.l.u.VectorUtilPanamaProvider] [laptop] Java vector incubator API enabled; uses preferredBitSize=128" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,038][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-url]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,042][INFO ][o.e.p.PluginsService ] [laptop] loaded module [rest-root]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-core]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-redact]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,043][INFO ][o.e.p.PluginsService ] [laptop] loaded module [ingest-user-agent]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-monitoring]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [repository-s3]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-analytics]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-ent-search]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-autoscaling]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,044][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-painless]]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [lang-expression]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:41,059][INFO ][o.e.p.PluginsService ] [laptop] loaded module [x-pack-eql]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:43,291][INFO ][o.e.e.NodeEnvironment ] [laptop] heap size [16gb], compressed ordinary object pointers [true]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:46,098][INFO ][o.e.x.s.Security ] [laptop] Security is enabled" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:47,227][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] Profiling is enabled" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:47,259][INFO ][o.e.x.p.ProfilingPlugin ] [laptop] profiling index templates will not be installed or reinstalled" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:47,755][INFO ][o.e.i.r.RecoverySettings ] [laptop] using rate limit [40mb] with [default=40mb, read=0b, write=0b, max=0b]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:47,787][INFO ][o.e.d.DiscoveryModule ] [laptop] using discovery type [multi-node] and seed hosts providers [settings]" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:49,188][INFO ][o.e.n.Node ] [laptop] initialized" }, { "index": { "_index": "test-logs" } }, { "message": "[2024-03-05T10:52:49,199][INFO ][o.e.n.Node ] [laptop] starting ..." } ], ) print(resp) resp1 = client.text_structure.find_field_structure( index="test-logs", field="message", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b638e11d6a8a084290f8934d224abd52.asciidoc000066400000000000000000000004201506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/troubleshooting-shards-capacity.asciidoc:450 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.max_shards_per_node.frozen": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b63ce79ce4fa1bb9b99a789f4dcfef4e.asciidoc000066400000000000000000000004021506101734100271660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:272 [source, python] ---- resp = client.indices.put_settings( index="test", settings={ "top_metrics_max_size": 100 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b65dbb51ddd496189c65a9326a53480c.asciidoc000066400000000000000000000005561506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-read-only-url.asciidoc:14 [source, python] ---- resp = client.snapshot.create_repository( name="my_read_only_url_repository", repository={ "type": "url", "settings": { "url": "file:/mount/backups/my_fs_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b66be1daf6c220eb66d94e708b2fae39.asciidoc000066400000000000000000000003121506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/state.asciidoc:150 [source, python] ---- resp = client.cluster.state( metric="metadata,routing_table", index="foo,bar", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b67fa8c560dd10a8e6f226048cd21562.asciidoc000066400000000000000000000010441506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:472 [source, python] ---- resp = client.render_search_template( source="{ \"query\": { \"bool\": { \"must\": {{#toJson}}clauses{{/toJson}} }}}", params={ "clauses": [ { "term": { "user.id": "kimchy" } }, { "term": { "url.domain": "example.com" } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b68ed7037042719945a2452d23e64c78.asciidoc000066400000000000000000000005161506101734100261330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:343 [source, python] ---- resp = client.index( index="my-index-000001", id="3", refresh=True, document={ "query": { "match": { "message": "brown fox" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b691d41f84b5b46e9051b51db22a46af.asciidoc000066400000000000000000000007241506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/rare-terms-aggregation.asciidoc:308 [source, python] ---- resp = client.search( aggs={ "genres": { "rare_terms": { "field": "genre", "include": [ "swing", "rock" ], "exclude": [ "jazz" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6a6aa9ba20e9a019371ae268488833f.asciidoc000066400000000000000000000003371506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/cluster/remote-clusters-migration.asciidoc:97 [source, python] ---- resp = client.cluster.get_settings( filter_path="persistent.cluster.remote", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6a7ffd2003c38f4aa321f067d162be5.asciidoc000066400000000000000000000014341506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:260 [source, python] ---- resp = client.search( index="my-index", query={ "bool": { "should": [ { "sparse_vector": { "field": "content_embedding", "inference_id": "my-elser-endpoint", "query": "How to avoid muscle soreness after running?", "boost": 1 } }, { "query_string": { "query": "toxins", "boost": 4 } } ] } }, min_score=10, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6c872d04eabb39d1947cde6b29d4ae1.asciidoc000066400000000000000000000004711506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:419 [source, python] ---- resp = client.search( aggs={ "tags": { "terms": { "field": "tags", "min_doc_count": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6d278737d27973e498ac61cda9e5126.asciidoc000066400000000000000000000007141506101734100263670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:509 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "daily_orders": { "date_histogram": { "field": "order_date", "calendar_interval": "day", "format": "yyyy-MM-dd", "min_doc_count": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6e29a0e14b611d4aaafb3051220ea56.asciidoc000066400000000000000000000006151506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/specify-analyzer.asciidoc:158 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "title": { "type": "text", "analyzer": "whitespace", "search_analyzer": "simple" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6e385760e036e36827f719b540d9c11.asciidoc000066400000000000000000000005641506101734100262160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/profile.asciidoc:1186 [source, python] ---- resp = client.search( index="my-dfs-index", search_type="dfs_query_then_fetch", pretty=True, size="0", profile=True, query={ "term": { "my-keyword": { "value": "a" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b6f690896001f8f9ad5bf24e1304a552.asciidoc000066400000000000000000000007431506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:162 [source, python] ---- resp = client.indices.create( index="my-byte-quantized-index", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 4, "index": True, "index_options": { "type": "int4_hnsw" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b717a583b5165e5c6caafc42fdfd9086.asciidoc000066400000000000000000000035261506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cartesian-bounds-aggregation.asciidoc:97 [source, python] ---- resp = client.indices.create( index="places", mappings={ "properties": { "geometry": { "type": "shape" } } }, ) print(resp) resp1 = client.bulk( index="places", refresh=True, operations=[ { "index": { "_id": 1 } }, { "name": "NEMO Science Museum", "geometry": "POINT(491.2350 5237.4081)" }, { "index": { "_id": 2 } }, { "name": "Sportpark De Weeren", "geometry": { "type": "Polygon", "coordinates": [ [ [ 496.5305328369141, 5239.347642069457 ], [ 496.6979026794433, 5239.172175893484 ], [ 496.9425201416015, 5239.238958618537 ], [ 496.7944622039794, 5239.420969150824 ], [ 496.5305328369141, 5239.347642069457 ] ] ] } } ], ) print(resp1) resp2 = client.search( index="places", size="0", aggs={ "viewport": { "cartesian_bounds": { "field": "geometry" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b724f547c5d67e95bbc0a9920e47033c.asciidoc000066400000000000000000000004601506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:289 [source, python] ---- resp = client.search( index="file-path-test", query={ "term": { "file_path.tree": "/User/alice/photos/2017/05/16" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b728d6ba226dba719aadcd8b8099cc74.asciidoc000066400000000000000000000003431506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:177 [source, python] ---- resp = client.cat.allocation( v=True, h="node,shards,disk.*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7a4f5b9a93eff44268a1ee38ee1c6d3.asciidoc000066400000000000000000000004331506101734100267320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:199 [source, python] ---- resp = client.reindex( source={ "index": "archive" }, dest={ "index": "my-data-stream", "op_type": "create" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7a9f60b3646efe3834ca8381f8aa560.asciidoc000066400000000000000000000003511506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/logging-config.asciidoc:193 [source, python] ---- resp = client.cluster.put_settings( persistent={ "logger.org.elasticsearch.discovery": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7ad394975863a8f5ee29627c3ab738b.asciidoc000066400000000000000000000005771506101734100264600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:248 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "prices": { "histogram": { "field": "price", "interval": 50, "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7bb5503e64bd869b2ac1c46c434a079.asciidoc000066400000000000000000000011171506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:226 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "histo": { "histogram": { "field": "price", "interval": 5 } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7c99eb38d4b37e22de1ffcb0e88ae4c.asciidoc000066400000000000000000000004101506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:279 [source, python] ---- resp = client.index( index="my-index-000001", id="2", document={ "message": "A new bonsai tree in the office" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7df0848b2dc3093f931976db5b8cfff.asciidoc000066400000000000000000000003351506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:38 [source, python] ---- resp = client.cluster.health( filter_path="status,*_shards", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b7f8bd33c22f3c93336ab57c2e091f73.asciidoc000066400000000000000000000003431506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/delete-query-rule.asciidoc:78 [source, python] ---- resp = client.query_rules.delete_rule( ruleset_id="my-ruleset", rule_id="my-rule1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b80e1f5b26bae4f3c2f8a604b7caaf17.asciidoc000066400000000000000000000010611506101734100267600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:290 [source, python] ---- resp = client.security.put_role_mapping( name="mapping7", roles=[ "ldap-example-user" ], enabled=True, rules={ "all": [ { "field": { "dn": "*,ou=subtree,dc=example,dc=com" } }, { "field": { "realm.name": "ldap1" } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b81a7b5f5ef19553f9cd49196f31018c.asciidoc000066400000000000000000000007201506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/distance-feature-query.asciidoc:37 [source, python] ---- resp = client.indices.create( index="items", mappings={ "properties": { "name": { "type": "keyword" }, "production_date": { "type": "date" }, "location": { "type": "geo_point" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b82b156c7b9d1d78054577a6947a6cdd.asciidoc000066400000000000000000000005131506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geo-grid.asciidoc:91 [source, python] ---- resp = client.index( index="geocells", id="1", pipeline="geotile2shape", document={ "geocell": "4/8/5" }, ) print(resp) resp1 = client.get( index="geocells", id="1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b839f79a5d58506baed5714f1876ab55.asciidoc000066400000000000000000000003631506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql-search-api.asciidoc:30 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n process where process.name == \"regsvr32.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b8400dbe39215705060500f0e569f452.asciidoc000066400000000000000000000003151506101734100261010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:312 [source, python] ---- resp = client.connector.get( connector_id="my-connector-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b84932030e60a2cd58884b9dc6d3147f.asciidoc000066400000000000000000000003371506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:644 [source, python] ---- resp = client.search_application.search( name="my_search_application", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b85716ba42a57096452665c38995da7d.asciidoc000066400000000000000000000006261506101734100262250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/preview-dfanalytics.asciidoc:75 [source, python] ---- resp = client.ml.preview_data_frame_analytics( config={ "source": { "index": "houses_sold_last_10_yrs" }, "analysis": { "regression": { "dependent_variable": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b857abedc64e367def172bd07075e5c7.asciidoc000066400000000000000000000011631506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/fingerprint-analyzer.asciidoc:89 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_fingerprint_analyzer": { "type": "fingerprint", "stopwords": "_english_" } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_fingerprint_analyzer", text="Yes yes, Gödel said this sentence is consistent and.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/b87438263ccd68624b1d69d8750f9432.asciidoc000066400000000000000000000006451506101734100262300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/doc-values.asciidoc:37 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "status_code": { "type": "long" }, "session_id": { "type": "long", "index": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b87bc8a521995051c7e7395f9c047e1c.asciidoc000066400000000000000000000014021506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/ignore-malformed.asciidoc:16 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "number_one": { "type": "integer", "ignore_malformed": True }, "number_two": { "type": "integer" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Some text value", "number_one": "foo" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "text": "Some text value", "number_two": "foo" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b88a2d96da1401d548a4540cca223d27.asciidoc000066400000000000000000000013441506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-vector-tile-api.asciidoc:707 [source, python] ---- resp = client.search_mvt( index="museums", field="location", zoom="13", x="4207", y="2692", grid_agg="geotile", grid_precision=2, fields=[ "name", "price" ], query={ "term": { "included": True } }, aggs={ "min_price": { "min": { "field": "price" } }, "max_price": { "max": { "field": "price" } }, "avg_price": { "avg": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b8c03bbd917d0cf5474a3e46ebdd7aad.asciidoc000066400000000000000000000004471506101734100270540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/cjk-bigram-tokenfilter.asciidoc:22 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "cjk_bigram" ], text="東京都は、日本の首都であり", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b8cc74a92bac837bfd8ba6d5935350ed.asciidoc000066400000000000000000000013551506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:317 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "enabled": False }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "user_id": "kimchy", "session_data": { "object": { "some_field": "some_value" } } }, ) print(resp1) resp2 = client.search( index="my-index-000001", fields=[ "user_id", { "field": "session_data.object.*", "include_unmapped": True } ], source=False, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b8dc3764c4467922474b2cdec74bb86b.asciidoc000066400000000000000000000003141506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:445 [source, python] ---- resp = client.transform.start_transform( transform_id="last-log-from-clientip", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b8e6e320a19936f6edfc242ccb5cde43.asciidoc000066400000000000000000000013461506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/position-increment-gap.asciidoc:15 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "names": [ "John Abraham", "Lincoln Smith" ] }, ) print(resp) resp1 = client.search( index="my-index-000001", query={ "match_phrase": { "names": { "query": "Abraham Lincoln" } } }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match_phrase": { "names": { "query": "Abraham Lincoln", "slop": 101 } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/b9370fa1aa18fe4bc00cf81ef0c0d45b.asciidoc000066400000000000000000000004741506101734100267560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:318 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "city.*" ], "query": "this AND that OR thus" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b94cee0f74f57742b3948f9b784dfdd4.asciidoc000066400000000000000000000007501506101734100266200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:537 [source, python] ---- resp = client.clear_scroll( scroll_id="DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==,DnF1ZXJ5VGhlbkZldGNoBQAAAAAAAAABFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAAAxZrUllkUVlCa1NqNmRMaUhiQlZkMWFBAAAAAAAAAAIWa1JZZFFZQmtTajZkTGlIYkJWZDFhQQAAAAAAAAAFFmtSWWRRWUJrU2o2ZExpSGJCVmQxYUEAAAAAAAAABBZrUllkUVlCa1NqNmRMaUhiQlZkMWFB", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b968853454b4416f7baa3209eb335957.asciidoc000066400000000000000000000010211506101734100262050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cartesian-centroid-aggregation.asciidoc:79 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "cities": { "terms": { "field": "city.keyword" }, "aggs": { "centroid": { "cartesian_centroid": { "field": "location" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b96f465abb658fe32889c3d183f159a3.asciidoc000066400000000000000000000007621506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/limit-token-count-tokenfilter.asciidoc:96 [source, python] ---- resp = client.indices.create( index="limit_example", settings={ "analysis": { "analyzer": { "standard_one_token_limit": { "tokenizer": "standard", "filter": [ "limit" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b9a8f39ab9b1ed18c6c1db61ac4e6a9e.asciidoc000066400000000000000000000003311506101734100270560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:317 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="_current", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b9ba66209b7fcc111a7bcef0b3e00052.asciidoc000066400000000000000000000004411506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/passthrough.asciidoc:77 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "attributes": { "id": "foo" }, "id": "bar" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/b9f716219359a6c973dafc50b348de33.asciidoc000066400000000000000000000004151506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/source-field.asciidoc:24 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "_source": { "enabled": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba07330ed3291b3970f4eb01dacd8086.asciidoc000066400000000000000000000042641506101734100264650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geodistance-aggregation.asciidoc:10 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggs={ "rings_around_amsterdam": { "geo_distance": { "field": "location", "origin": "POINT (4.894 52.3760)", "ranges": [ { "to": 100000 }, { "from": 100000, "to": 300000 }, { "from": 300000 } ] } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ba0e7e0b18fc9ec6c623d40186d1f61b.asciidoc000066400000000000000000000004121506101734100266230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve-cluster.asciidoc:271 [source, python] ---- resp = client.indices.resolve_cluster( name="not-present,clust*:my-index*,oldcluster:*", ignore_unavailable=False, timeout="5s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba10b644a4e9a2e7d78744ca607355d0.asciidoc000066400000000000000000000005461506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/put-follow.asciidoc:91 [source, python] ---- resp = client.ccr.follow( index=".ds-logs-mysql-default_copy-2022-01-01-000001", remote_cluster="remote_cluster", leader_index=".ds-logs-mysql-default-2022-01-01-000001", data_stream_name="logs-mysql-default_copy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba21a7fbb74180ff138d97032f28ace7.asciidoc000066400000000000000000000005711506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-user-profile-data.asciidoc:106 [source, python] ---- resp = client.security.update_user_profile_data( uid="u_P_0BMHgaOK3p7k-PFWUCbw9dQ-UFjt01oWJ_Dp2PmPc_0", labels={ "direction": "east" }, data={ "app1": { "theme": "default" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba3b9783aa188c6841e1926c5ab1472d.asciidoc000066400000000000000000000005061506101734100264170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:101 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1", "index2" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba5dc6fb9bbe1406714da5d641462a23.asciidoc000066400000000000000000000007751506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:96 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "strings_as_ip": { "match_mapping_type": "string", "match": "ip*", "runtime": { "type": "ip" } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba6040de55afb2c8fb9e5b24bb038820.asciidoc000066400000000000000000000002701506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template-v1.asciidoc:94 [source, python] ---- resp = client.indices.get_template( name="temp*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba650046f9063f6c43d76f47e0f94403.asciidoc000066400000000000000000000012161506101734100262650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date.asciidoc:244 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "date": { "type": "date" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "date": [ "2015-01-01T12:10:30Z", "2014-01-01T12:10:30Z" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ba66768ed04f7b87906badff40ff40ed.asciidoc000066400000000000000000000006521506101734100267370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:153 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50gb" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba8c3578613ae0bf890f6a05706ce776.asciidoc000066400000000000000000000006551506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1024 [source, python] ---- resp = client.eql.search( index="my-data-stream", filter_path="-hits.events._source", query="\n process where process.name == \"regsvr32.exe\"\n ", fields=[ "event.type", "process.*", { "field": "@timestamp", "format": "epoch_millis" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ba9a5f66a6148612de0ad2491fd6c90d.asciidoc000066400000000000000000000013441506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/classic-tokenizer.asciidoc:148 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "classic", "max_token_length": 5 } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/baadbfffcd0c16f51eb3537f516dc3ed.asciidoc000066400000000000000000000003631506101734100272070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/disable-user-profile.asciidoc:65 [source, python] ---- resp = client.security.disable_user_profile( uid="u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bab4c3b22c1768fcc7153345e4096dfb.asciidoc000066400000000000000000000005101506101734100265440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc:79 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "keyword_repeat", "stemmer", "remove_duplicates" ], text="jumping dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb067c049331cc850a77b18bdfff81b5.asciidoc000066400000000000000000000021621506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1311 [source, python] ---- resp = client.indices.create( index="lithuanian_example", settings={ "analysis": { "filter": { "lithuanian_stop": { "type": "stop", "stopwords": "_lithuanian_" }, "lithuanian_keywords": { "type": "keyword_marker", "keywords": [ "pavyzdys" ] }, "lithuanian_stemmer": { "type": "stemmer", "language": "lithuanian" } }, "analyzer": { "rebuilt_lithuanian": { "tokenizer": "standard", "filter": [ "lowercase", "lithuanian_stop", "lithuanian_keywords", "lithuanian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb28d1f7f3f09f5061d7f4351aee89fc.asciidoc000066400000000000000000000007511506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:96 [source, python] ---- resp = client.security.put_role( name="test_role4", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "customer.*" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb293e1bdf0c6f6d9069eeb7edc9d399.asciidoc000066400000000000000000000003021506101734100270170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/disable-users.asciidoc:51 [source, python] ---- resp = client.security.disable_user( username="jacknich", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb2ba5d1885f87506f90dbb002e518f4.asciidoc000066400000000000000000000024301506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:604 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } } ], "rank_window_size": 10, "rank_constant": 1 } }, highlight={ "fields": { "text": { "fragment_size": 150, "number_of_fragments": 3 } } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb5a67e3d2d9cd3016e487e627769fe8.asciidoc000066400000000000000000000067331506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:129 [source, python] ---- resp = client.bulk( index="cooking_blog", refresh="wait_for", operations=[ { "index": { "_id": "1" } }, { "title": "Perfect Pancakes: A Fluffy Breakfast Delight", "description": "Learn the secrets to making the fluffiest pancakes, so amazing you won't believe your tastebuds. This recipe uses buttermilk and a special folding technique to create light, airy pancakes that are perfect for lazy Sunday mornings.", "author": "Maria Rodriguez", "date": "2023-05-01", "category": "Breakfast", "tags": [ "pancakes", "breakfast", "easy recipes" ], "rating": 4.8 }, { "index": { "_id": "2" } }, { "title": "Spicy Thai Green Curry: A Vegetarian Adventure", "description": "Dive into the flavors of Thailand with this vibrant green curry. Packed with vegetables and aromatic herbs, this dish is both healthy and satisfying. Don't worry about the heat - you can easily adjust the spice level to your liking.", "author": "Liam Chen", "date": "2023-05-05", "category": "Main Course", "tags": [ "thai", "vegetarian", "curry", "spicy" ], "rating": 4.6 }, { "index": { "_id": "3" } }, { "title": "Classic Beef Stroganoff: A Creamy Comfort Food", "description": "Indulge in this rich and creamy beef stroganoff. Tender strips of beef in a savory mushroom sauce, served over a bed of egg noodles. It's the ultimate comfort food for chilly evenings.", "author": "Emma Watson", "date": "2023-05-10", "category": "Main Course", "tags": [ "beef", "pasta", "comfort food" ], "rating": 4.7 }, { "index": { "_id": "4" } }, { "title": "Vegan Chocolate Avocado Mousse", "description": "Discover the magic of avocado in this rich, vegan chocolate mousse. Creamy, indulgent, and secretly healthy, it's the perfect guilt-free dessert for chocolate lovers.", "author": "Alex Green", "date": "2023-05-15", "category": "Dessert", "tags": [ "vegan", "chocolate", "avocado", "healthy dessert" ], "rating": 4.5 }, { "index": { "_id": "5" } }, { "title": "Crispy Oven-Fried Chicken", "description": "Get that perfect crunch without the deep fryer! This oven-fried chicken recipe delivers crispy, juicy results every time. A healthier take on the classic comfort food.", "author": "Maria Rodriguez", "date": "2023-05-20", "category": "Main Course", "tags": [ "chicken", "oven-fried", "healthy" ], "rating": 4.9 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb64a7228a479f6aeeaccaf7560e11ee.asciidoc000066400000000000000000000013711506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:394 [source, python] ---- resp = client.transform.put_transform( transform_id="last-log-from-clientip", source={ "index": [ "kibana_sample_data_logs" ] }, latest={ "unique_key": [ "clientip" ], "sort": "timestamp" }, frequency="1m", dest={ "index": "last-log-from-clientip" }, sync={ "time": { "field": "timestamp", "delay": "60s" } }, retention_policy={ "time": { "field": "timestamp", "max_age": "30d" } }, settings={ "max_page_search_size": 500 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb792e64a4c1f872296073b457aa03c8.asciidoc000066400000000000000000000003521506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:366 [source, python] ---- resp = client.snapshot.delete( repository="my_repository", snapshot="my_snapshot_2099.05.06", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb975b342de7e838ebf6a36aaa1a8749.asciidoc000066400000000000000000000005631506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:477 [source, python] ---- resp = client.index( index="my-index-000001", id="3", routing="1", refresh=True, document={ "text": "This is a vote", "my_join_field": { "name": "vote", "parent": "2" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bb9e268ec62d19ca2a6366cbb48fae68.asciidoc000066400000000000000000000002231506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/count.asciidoc:95 [source, python] ---- resp = client.cat.count( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bc01aee2ab2ce1690986374bd836e1c7.asciidoc000066400000000000000000000006211506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:317 [source, python] ---- resp = client.search( index="cooking_blog", query={ "multi_match": { "query": "vegetarian curry", "fields": [ "title", "description", "tags" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bc4d308069af23929a49d856f6bc3008.asciidoc000066400000000000000000000014061506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geodistance-aggregation.asciidoc:122 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "rings": { "geo_distance": { "field": "location", "origin": "POINT (4.894 52.3760)", "unit": "km", "distance_type": "plane", "ranges": [ { "to": 100 }, { "from": 100, "to": 300 }, { "from": 300 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcae0f00ae1e6f08fa395ca741fe84f9.asciidoc000066400000000000000000000007531506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rank-eval.asciidoc:403 [source, python] ---- resp = client.rank_eval( index="my-index-000001", requests=[ { "id": "JFK query", "request": { "query": { "match_all": {} } }, "ratings": [] } ], metric={ "dcg": { "k": 20, "normalize": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcb572658986d69ae17c28ddd7e4bfd8.asciidoc000066400000000000000000000003051506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/field-usage-stats.asciidoc:172 [source, python] ---- resp = client.indices.field_usage_stats( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcbd4d4749126837723438ff4faeb0f6.asciidoc000066400000000000000000000006061506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:192 [source, python] ---- resp = client.search( index="my-index-000001", filter_path="aggregations", size=0, aggs={ "top_values": { "terms": { "field": "my-field", "size": 10 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcc75fc01b45e482638c65b8fbdf09fa.asciidoc000066400000000000000000000002511506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:419 [source, python] ---- resp = client.search( index="books", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bccd4eb26b1a325d103b12e198a13c08.asciidoc000066400000000000000000000003731506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/slowlog.asciidoc:102 [source, python] ---- resp = client.indices.get_settings( index="_all", expand_wildcards="all", filter_path="*.settings.index.*.slowlog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcd1afb793240b1dddd9fa5d3f21192b.asciidoc000066400000000000000000000004031506101734100267550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:315 [source, python] ---- resp = client.update( index="test", id="1", doc={ "product_price": 100 }, upsert={ "product_price": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bcdfaa4487747249699a86a0dcd22f5e.asciidoc000066400000000000000000000013201506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-ingest.asciidoc:352 [source, python] ---- resp = client.simulate.ingest( docs=[ { "_index": "my-index", "_id": "123", "_source": { "foo": "bar" } }, { "_index": "my-index", "_id": "456", "_source": { "foo": "rab" } } ], pipeline_substitutions={ "my-pipeline": { "processors": [ { "uppercase": { "field": "foo" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd0d30a7683037e1ebadd163514765d4.asciidoc000066400000000000000000000011321506101734100263770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/configuring-active-directory-realm.asciidoc:192 [source, python] ---- resp = client.security.put_role_mapping( name="basic_users", roles=[ "user" ], rules={ "any": [ { "field": { "groups": "cn=users,dc=example,dc=com" } }, { "field": { "dn": "cn=John Doe,cn=contractors,dc=example,dc=com" } } ] }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd1e55b8cb2ca9e496e223e717d76640.asciidoc000066400000000000000000000011241506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-polygon-query.asciidoc:93 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_polygon": { "person.location": { "points": [ "40, -70", "30, -80", "20, -90" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd23c3a03907b1238dcb07ab9eecae7b.asciidoc000066400000000000000000000003151506101734100267520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:367 [source, python] ---- resp = client.update_by_query( index="my-index-000001", scroll_size="100", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd298b11933605c641626750c981d70b.asciidoc000066400000000000000000000015621506101734100261230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-multi-component-templates.asciidoc:50 [source, python] ---- resp = client.cluster.put_component_template( name="ct1", template={ "settings": { "index.number_of_shards": 2 } }, ) print(resp) resp1 = client.cluster.put_component_template( name="ct2", template={ "settings": { "index.number_of_replicas": 0 }, "mappings": { "properties": { "@timestamp": { "type": "date" } } } }, ) print(resp1) resp2 = client.indices.simulate_template( index_patterns=[ "my*" ], template={ "settings": { "index.number_of_shards": 3 } }, composed_of=[ "ct1", "ct2" ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/bd2a387e8c21bf01a1039e81d7602921.asciidoc000066400000000000000000000007541506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:788 [source, python] ---- resp = client.put_script( id="my-search-template", script={ "lang": "mustache", "source": { "query": { "multi_match": { "query": "{{query_string}}", "fields": "[{{#text_fields}}{{user_name}},{{/text_fields}}]" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd3d710ec50a151453e141691163af72.asciidoc000066400000000000000000000002451506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:276 [source, python] ---- resp = client.tasks.list( group_by="parents", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd458073196a19ecdeb24a8016488c20.asciidoc000066400000000000000000000003151506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-index-template.asciidoc:32 [source, python] ---- resp = client.indices.delete_index_template( name="my-index-template", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd57976bc93ca64b2d3e001df9f06c82.asciidoc000066400000000000000000000003311506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve.asciidoc:107 [source, python] ---- resp = client.indices.resolve_index( name="f*,remoteCluster1:bar*", expand_wildcards="all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd5bd5d8b3d81241335fe1e5747080ac.asciidoc000066400000000000000000000006741506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/error-handling.asciidoc:122 [source, python] ---- resp = client.ilm.put_lifecycle( name="shrink-index", policy={ "phases": { "warm": { "min_age": "5d", "actions": { "shrink": { "number_of_shards": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd68666ca2e0be12f7624016317a62bc.asciidoc000066400000000000000000000004001506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2573 [source, python] ---- resp = client.nodes.stats( groups="_all", ) print(resp) resp1 = client.nodes.stats( metric="indices", groups="foo,bar", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/bd6f30e3caa3632260da42d9ff82c98c.asciidoc000066400000000000000000000003031506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-api-key-cache.asciidoc:63 [source, python] ---- resp = client.security.clear_api_key_cache( ids="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd7330af2609bdd8aa10958f5e640b93.asciidoc000066400000000000000000000005131506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:649 [source, python] ---- resp = client.index( index="my_queries2", id="2", refresh=True, document={ "query": { "match": { "my_field.suffix": "xyz" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd767ea03171fe71c73f58f16d5da92f.asciidoc000066400000000000000000000004471506101734100265760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:273 [source, python] ---- resp = client.search( index="file-path-test", query={ "match": { "file_path": "/User/bob/photos/2017/05" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd7a1417fc27b5a801334ec44462b376.asciidoc000066400000000000000000000002371506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/datafeeds.asciidoc:130 [source, python] ---- resp = client.cat.ml_datafeeds( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bd7fa2f122ab861cd00e0b9154d120b3.asciidoc000066400000000000000000000007061506101734100265210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:29 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "@timestamp": { "format": "strict_date_optional_time||epoch_second", "type": "date" }, "message": { "type": "wildcard" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bdaf00d791706d7fde25fd65d3735b94.asciidoc000066400000000000000000000012261506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/keyword.asciidoc:184 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "kwd": { "type": "keyword" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "kwd": [ "foo", "foo", "bar", "baz" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/bdb30dd52d32f50994008f4f9c0da5f0.asciidoc000066400000000000000000000003561506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:571 [source, python] ---- resp = client.update_by_query_rethrottle( task_id="r1A2WoRbTwKZ516z6NEs5A:36619", requests_per_second="-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bdc1afd2181154bb78797360f9dbb1a0.asciidoc000066400000000000000000000004301506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/ack-watch.asciidoc:140 [source, python] ---- resp = client.watcher.execute_watch( id="my_watch", record_execution=True, ) print(resp) resp1 = client.watcher.get_watch( id="my_watch", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/bdc55256fa5f701680631a149dbb75a9.asciidoc000066400000000000000000000007041506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:420 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "sales_by_category": { "terms": { "field": "category.keyword", "size": 5, "order": { "_count": "desc" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bdc68012c121062628d6d73468bf4866.asciidoc000066400000000000000000000003171506101734100262020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/register-repository.asciidoc:215 [source, python] ---- resp = client.snapshot.cleanup_repository( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bdd28276618235487ac96bd6679bc206.asciidoc000066400000000000000000000013501506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/aggs-tutorial.asciidoc:1770 [source, python] ---- resp = client.search( index="kibana_sample_data_ecommerce", size=0, aggs={ "daily_sales": { "date_histogram": { "field": "order_date", "calendar_interval": "day" }, "aggs": { "revenue": { "sum": { "field": "taxful_total_price" } }, "cumulative_revenue": { "cumulative_sum": { "buckets_path": "revenue" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bde74dbbcef8ebf8541cae2c1711255f.asciidoc000066400000000000000000000003141506101734100270510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search-application/apis/get-search-application.asciidoc:93 [source, python] ---- resp = client.search_application.get( name="my-app", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bdfb86cdfffb9d2ee6e3d399f00a57b0.asciidoc000066400000000000000000000017371506101734100271620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/top-metrics-aggregation.asciidoc:499 [source, python] ---- resp = client.search( index="test*", filter_path="aggregations", aggs={ "ip": { "terms": { "field": "ip" }, "aggs": { "tm": { "top_metrics": { "metrics": { "field": "m" }, "sort": { "s": "desc" }, "size": 1 } }, "having_tm": { "bucket_selector": { "buckets_path": { "top_m": "tm[m]" }, "script": "params.top_m < 1000" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be285eef1d2df0dfcf876e2d4b361f1e.asciidoc000066400000000000000000000015521506101734100270670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/common-grams-tokenfilter.asciidoc:206 [source, python] ---- resp = client.indices.create( index="common_grams_example", settings={ "analysis": { "analyzer": { "index_grams": { "tokenizer": "whitespace", "filter": [ "common_grams_query" ] } }, "filter": { "common_grams_query": { "type": "common_grams", "common_words": [ "a", "is", "the" ], "ignore_case": True, "query_mode": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be3a6431d01846950dc1a39a7a6a1faa.asciidoc000066400000000000000000000002771506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:532 [source, python] ---- resp = client.tasks.get( task_id="r1A2WoRbTwKZ516z6NEs5A:36619", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be5b415d7f33d6f0397ac2f8b5c10521.asciidoc000066400000000000000000000004361506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:647 [source, python] ---- resp = client.update_by_query( index="my-index-000001", refresh=True, slices="5", script={ "source": "ctx._source['extra'] = 'test'" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be5c5a9c25901737585e4fff9195da3c.asciidoc000066400000000000000000000006571506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:435 [source, python] ---- resp = client.search( index="my-bit-vectors", filter_path="hits.hits", query={ "knn": { "query_vector": [ 127, -127, 0, 1, 42 ], "field": "my_vector" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be5d62e7c8f63687c585305fbe70d7d0.asciidoc000066400000000000000000000006521506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:288 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time", "tdigest": { "compression": 200 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be5fef0640c3a650ee96f84e3376a1be.asciidoc000066400000000000000000000007101506101734100266410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:335 [source, python] ---- resp = client.update( index="test", id="1", scripted_upsert=True, script={ "source": "\n if ( ctx.op == 'create' ) {\n ctx._source.counter = params.count\n } else {\n ctx._source.counter += params.count\n }\n ", "params": { "count": 4 } }, upsert={}, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be6b0bfcdce1ef100af89f74da5d4748.asciidoc000066400000000000000000000005731506101734100270640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/put-trained-model-definition-part.asciidoc:70 [source, python] ---- resp = client.ml.put_trained_model_definition_part( model_id="elastic__distilbert-base-uncased-finetuned-conll03-english", part="0", definition="...", total_definition_length=265632637, total_parts=64, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be9376b1e354ad9c6bdad83f6a0ce5ad.asciidoc000066400000000000000000000030311506101734100270530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:129 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_flights", "query": { "bool": { "filter": [ { "term": { "Cancelled": False } } ] } } }, dest={ "index": "sample_flight_delays_by_carrier" }, pivot={ "group_by": { "carrier": { "terms": { "field": "Carrier" } } }, "aggregations": { "flights_count": { "value_count": { "field": "FlightNum" } }, "delay_mins_total": { "sum": { "field": "FlightDelayMin" } }, "flight_mins_total": { "sum": { "field": "FlightTimeMin" } }, "delay_time_percentage": { "bucket_script": { "buckets_path": { "delay_time": "delay_mins_total.value", "flight_time": "flight_mins_total.value" }, "script": "(params.delay_time / params.flight_time) * 100" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/be9836fe55c5fada404a2adc1663d832.asciidoc000066400000000000000000000012021506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1435 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "http": { "type": "composite", "script": "emit(grok(\"%{COMMONAPACHELOG}\").extract(doc[\"message\"].value))", "fields": { "clientip": { "type": "ip" }, "verb": { "type": "keyword" }, "response": { "type": "long" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/beaf43b274b0f32cf3cf48f59e5cb1f2.asciidoc000066400000000000000000000004371506101734100270000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:751 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot_*", sort="start_time", from_sort_value="1577833200000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/beb0b9ff4f68672273fcff1b7bae706b.asciidoc000066400000000000000000000004751506101734100270150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:411 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "user_identifier": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/beba2a9795c8a13653e1edf64eec4357.asciidoc000066400000000000000000000005041506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/filtering.asciidoc:74 [source, python] ---- resp = client.indices.put_settings( index="test", settings={ "index.routing.allocation.require.size": "big", "index.routing.allocation.require.rack": "rack1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bed14cc152522ca0726ac3746ebc31db.asciidoc000066400000000000000000000014331506101734100266050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/unsigned_long.asciidoc:31 [source, python] ---- resp = client.bulk( index="my_index", refresh=True, operations=[ { "index": { "_id": 1 } }, { "my_counter": 0 }, { "index": { "_id": 2 } }, { "my_counter": 9223372036854776000 }, { "index": { "_id": 3 } }, { "my_counter": 18446744073709552000 }, { "index": { "_id": 4 } }, { "my_counter": 18446744073709552000 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/befa73a8a419fcf3b7798548b54a20bf.asciidoc000066400000000000000000000022661506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1146 [source, python] ---- resp = client.search( index="my-index", size=10, knn={ "query_vector": [ 0.04283529, 0.85670587, -0.51402352, 0 ], "field": "my_int4_vector", "k": 20, "num_candidates": 50 }, rescore={ "window_size": 20, "query": { "rescore_query": { "script_score": { "query": { "match_all": {} }, "script": { "source": "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", "params": { "queryVector": [ 0.04283529, 0.85670587, -0.51402352, 0 ] } } } }, "query_weight": 0, "rescore_query_weight": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf17440ac178d2ef5f5be643d033920b.asciidoc000066400000000000000000000005411506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:138 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "my-index", "pipeline": "elser-v2-test" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf1de9fa1b825fa875d27fa08821a6d1.asciidoc000066400000000000000000000003741506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-across-clusters.asciidoc:119 [source, python] ---- resp = client.security.put_user( username="remote_user", password="", roles=[ "remote1" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf2e6ea2bae621b9b2fee7003e891f86.asciidoc000066400000000000000000000004741506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/stemming.asciidoc:58 [source, python] ---- resp = client.search( index="index", query={ "simple_query_string": { "fields": [ "body" ], "query": "ski" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf3c3bc41c593a80faebef1df353e483.asciidoc000066400000000000000000000007751506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:169 [source, python] ---- resp = client.inference.put( task_type="rerank", inference_id="jinaai-rerank", inference_config={ "service": "jinaai", "service_settings": { "api_key": "", "model_id": "jina-reranker-v2-base-multilingual" }, "task_settings": { "top_n": 10, "return_documents": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf3f520b47581d861e802730aaf2a519.asciidoc000066400000000000000000000004571506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:35 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": "logs-nginx.access-prod", "alias": "logs" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf448c3889c18266e2e6d3af4f614da2.asciidoc000066400000000000000000000006761506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:336 [source, python] ---- resp = client.index( index=".ds-my-data-stream-2099-03-08-000003", id="bfspvnIBr7VVZlfp2lqX", if_seq_no="0", if_primary_term="1", document={ "@timestamp": "2099-03-08T11:06:07.000Z", "user": { "id": "8a4f500d" }, "message": "Login successful" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf639275d0818be04317ee5ab6075da6.asciidoc000066400000000000000000000007011506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-parent-query.asciidoc:52 [source, python] ---- resp = client.search( index="my-index-000001", query={ "has_parent": { "parent_type": "parent", "query": { "term": { "tag": { "value": "Elasticsearch" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bf8680d940c84e43a9483a25548dea57.asciidoc000066400000000000000000000024721506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/search-analyzer.asciidoc:16 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "filter": { "autocomplete_filter": { "type": "edge_ngram", "min_gram": 1, "max_gram": 20 } }, "analyzer": { "autocomplete": { "type": "custom", "tokenizer": "standard", "filter": [ "lowercase", "autocomplete_filter" ] } } } }, mappings={ "properties": { "text": { "type": "text", "analyzer": "autocomplete", "search_analyzer": "standard" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Quick Brown Fox" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match": { "text": { "query": "Quick Br", "operator": "and" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/bf9f13dc6c24cc225a72e32177e9ee02.asciidoc000066400000000000000000000034651506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:17 [source, python] ---- resp = client.indices.create( index="my_locations", mappings={ "properties": { "pin": { "properties": { "location": { "type": "geo_point" } } } } }, ) print(resp) resp1 = client.index( index="my_locations", id="1", document={ "pin": { "location": { "lat": 40.12, "lon": -71.34 } } }, ) print(resp1) resp2 = client.indices.create( index="my_geoshapes", mappings={ "properties": { "pin": { "properties": { "location": { "type": "geo_shape" } } } } }, ) print(resp2) resp3 = client.index( index="my_geoshapes", id="1", document={ "pin": { "location": { "type": "polygon", "coordinates": [ [ [ 13, 51.5 ], [ 15, 51.5 ], [ 15, 54 ], [ 13, 54 ], [ 13, 51.5 ] ] ] } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/bfb0db2a72f22c9c2046119777efbb43.asciidoc000066400000000000000000000007141506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-search.asciidoc:78 [source, python] ---- resp = client.search( index="elser-embeddings", query={ "sparse_vector": { "field": "content_embedding", "inference_id": "elser_embeddings", "query": "How to avoid muscle soreness after running?" } }, source=[ "id", "content" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bfb1aa83da8e3f414d50b5ed7894ed33.asciidoc000066400000000000000000000006631506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:165 [source, python] ---- resp = client.search( index="my-index-000001", script_fields={ "my_doubled_field": { "script": { "source": "field('my_field').get(null) * params['multiplier']", "params": { "multiplier": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bfb8a15cd05b43094ffbce8078bad3e1.asciidoc000066400000000000000000000003431506101734100267660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:357 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot_2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bfd6fa3f44e6165f8999102f5a8e24d6.asciidoc000066400000000000000000000007451506101734100265320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:41 [source, python] ---- resp = client.search( index="index1", query={ "query_string": { "query": "running with scissors", "fields": [ "comment", "comment.english" ] } }, highlight={ "order": "score", "fields": { "comment": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/bfdad8a928ea30d7cf60d0a0a6bc6e2e.asciidoc000066400000000000000000000014671506101734100271310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/bulk.asciidoc:721 [source, python] ---- resp = client.bulk( filter_path="items.*.error", operations=[ { "update": { "_id": "5", "_index": "index1" } }, { "doc": { "my_field": "baz" } }, { "update": { "_id": "6", "_index": "index1" } }, { "doc": { "my_field": "baz" } }, { "update": { "_id": "7", "_index": "index1" } }, { "doc": { "my_field": "baz" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c00c9412609832ebceb9e786dd9542df.asciidoc000066400000000000000000000004571506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-name-description-api.asciidoc:85 [source, python] ---- resp = client.connector.update_name( connector_id="my-connector", name="Custom connector", description="This is my customized connector", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c012f42b26eb8dd9b197644c3ed954cf.asciidoc000066400000000000000000000006421506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:400 [source, python] ---- resp = client.index( index="my-index-000001", id="2", document={ "name": { "first": "Paul", "last": "McCartney", "title": { "value": "Sir", "category": "order of chivalry" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c03ce952de42eae4b522cedc9fd3d14a.asciidoc000066400000000000000000000005161506101734100270460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:269 [source, python] ---- resp = client.index( index="example", document={ "location": "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c065a200c00e2005d88ec2f0c10c908a.asciidoc000066400000000000000000000004211506101734100263500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/shingle-tokenfilter.asciidoc:31 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "shingle" ], text="quick brown fox jumps", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c067182d385f59ce5952fb9a716fbf05.asciidoc000066400000000000000000000012151506101734100264400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/post-calendar-event.asciidoc:85 [source, python] ---- resp = client.ml.post_calendar_events( calendar_id="planned-outages", events=[ { "description": "event 1", "start_time": 1513641600000, "end_time": 1513728000000 }, { "description": "event 2", "start_time": 1513814400000, "end_time": 1513900800000 }, { "description": "event 3", "start_time": 1514160000000, "end_time": 1514246400000 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c088ce5291ae28650b6091cdec489398.asciidoc000066400000000000000000000007401506101734100263600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/point-in-time-api.asciidoc:55 [source, python] ---- resp = client.search( size=100, query={ "match": { "title": "elasticsearch" } }, pit={ "id": "46ToAwMDaWR5BXV1aWQyKwZub2RlXzMAAAAAAAAAACoBYwADaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQADaWR5BXV1aWQyKgZub2RlXzIAAAAAAAAAAAwBYgACBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", "keep_alive": "1m" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c0a4b0c1c6eff14da8b152ceb19c1c31.asciidoc000066400000000000000000000002741506101734100267440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/restart-cluster.asciidoc:93 [source, python] ---- resp = client.cat.health() print(resp) resp1 = client.cat.nodes() print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c0c638e3d218b0ecbe5c4d77c964ae9e.asciidoc000066400000000000000000000004431506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/term-query.asciidoc:28 [source, python] ---- resp = client.search( query={ "term": { "user.id": { "value": "kimchy", "boost": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c0ddfb2e6315f5bcf0d3ef414b5bbed3.asciidoc000066400000000000000000000004401506101734100271150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-configuration-api.asciidoc:342 [source, python] ---- resp = client.connector.update_configuration( connector_id="my-spo-connector", values={ "secret_value": "foo-bar" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c0ebaa33e750b87555dc352073f692e8.asciidoc000066400000000000000000000010641506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:187 [source, python] ---- resp = client.indices.close( index="my-index-000001", ) print(resp) resp1 = client.indices.put_settings( index="my-index-000001", settings={ "analysis": { "analyzer": { "content": { "type": "custom", "tokenizer": "whitespace" } } } }, ) print(resp1) resp2 = client.indices.open( index="my-index-000001", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/c0ff8b3db994c4736f7579dde18097d2.asciidoc000066400000000000000000000003551506101734100265360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:303 [source, python] ---- resp = client.get_source( index="my-index-000001", id="1", source_includes="*.id", source_excludes="entities", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c10a486a28cbc5b2f15c3474ae31a431.asciidoc000066400000000000000000000007371506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:187 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="nightly-snapshots", schedule="0 30 1 * * ?", name="", repository="my_repository", config={ "indices": "*", "include_global_state": True }, retention={ "expire_after": "30d", "min_count": 5, "max_count": 50 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c11c4d6b30e882871bf0074f407149bd.asciidoc000066400000000000000000000004671506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/parent-id-query.asciidoc:47 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "text": "This is a parent document.", "my-join-field": "my-parent" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c12d6e962f083c728f9397932f05202e.asciidoc000066400000000000000000000004211506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connector-sync-jobs-api.asciidoc:78 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", params={ "connector_id": "connector-1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1409f591a01589638d9b00436ce42c0.asciidoc000066400000000000000000000003531506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-cache.asciidoc:67 [source, python] ---- resp = client.security.clear_cached_realms( realms="default_file", usernames="rdeniro,alpacino", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c147de68fd6da032ad4a3c1bf626f5d6.asciidoc000066400000000000000000000005541506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:422 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "fields": { "comment": { "type": "plain" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c155d2670ff82b135c7dcec0fc8a3f23.asciidoc000066400000000000000000000003271506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1378 [source, python] ---- resp = client.eql.delete( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c18100d62ed31bc9e05f62900156e6a8.asciidoc000066400000000000000000000003141506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connectors-api.asciidoc:102 [source, python] ---- resp = client.connector.list( index_name="search-google-drive", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c186ecf6f799ddff7add1abdecea5821.asciidoc000066400000000000000000000016371506101734100272440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/fields.asciidoc:287 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "full_name": { "type": "text", "store": True }, "title": { "type": "text", "store": True } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", refresh=True, document={ "full_name": "Alice Ball", "title": "Professor" }, ) print(resp1) resp2 = client.search( index="my-index-000001", script_fields={ "name_with_title": { "script": { "lang": "painless", "source": "params._fields['title'].value + ' ' + params._fields['full_name'].value" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/c187b52646cedeebe0716327add65642.asciidoc000066400000000000000000000004051506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/apis/get-async-sql-search-api.asciidoc:18 [source, python] ---- resp = client.sql.get_async( id="FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI=", format="json", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1a39c2628ada04c3ddd61a303b65d44.asciidoc000066400000000000000000000014321506101734100265270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:200 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "(24 - hamming(params.queryVector, 'my_byte_dense_vector')) / 24", "params": { "queryVector": [ 4, 3, 0 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1a895497066a3dac674d4b1a119048d.asciidoc000066400000000000000000000004331506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/term-query.asciidoc:137 [source, python] ---- resp = client.search( index="my-index-000001", pretty=True, query={ "term": { "full_text": "Quick Brown Foxes!" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1ac9e53b04f7acee4b4933969d6b574.asciidoc000066400000000000000000000012021506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/preview-transform.asciidoc:296 [source, python] ---- resp = client.transform.preview_transform( source={ "index": "kibana_sample_data_ecommerce" }, pivot={ "group_by": { "customer_id": { "terms": { "field": "customer_id", "missing_bucket": True } } }, "aggregations": { "max_price": { "max": { "field": "taxful_total_price" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1ad9ff64728a5bfeeb485e60ec694a1.asciidoc000066400000000000000000000010041506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rank-eval.asciidoc:459 [source, python] ---- resp = client.rank_eval( index="my-index-000001", requests=[ { "id": "JFK query", "request": { "query": { "match_all": {} } }, "ratings": [] } ], metric={ "expected_reciprocal_rank": { "maximum_relevance": 3, "k": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c1efc5cfcb3c29711bfe118f1baa28b0.asciidoc000066400000000000000000000006451506101734100270400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/keyword-analyzer.asciidoc:71 [source, python] ---- resp = client.indices.create( index="keyword_example", settings={ "analysis": { "analyzer": { "rebuilt_keyword": { "tokenizer": "keyword", "filter": [] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c208a06212dc0cf6ac413d4f2c154296.asciidoc000066400000000000000000000002751506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/flush.asciidoc:137 [source, python] ---- resp = client.indices.flush( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c208de54369379e8d78ab201be18b6be.asciidoc000066400000000000000000000013101506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:234 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "longs_as_strings": { "match_mapping_type": "string", "match": "long_*", "unmatch": "*_text", "mapping": { "type": "long" } } } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "long_num": "5", "long_text": "foo" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c21aaedb5752a83489476fa3b5e2e9ff.asciidoc000066400000000000000000000012431506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/put-query-rule.asciidoc:120 [source, python] ---- resp = client.query_rules.put_rule( ruleset_id="my-ruleset", rule_id="my-rule1", type="pinned", criteria=[ { "type": "contains", "metadata": "user_query", "values": [ "pugs", "puggles" ] }, { "type": "exact", "metadata": "user_country", "values": [ "us" ] } ], actions={ "ids": [ "id1", "id2" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c21eb4bc30087188241cbba6b6b89999.asciidoc000066400000000000000000000004111506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-service-type-api.asciidoc:84 [source, python] ---- resp = client.connector.update_service_type( connector_id="my-connector", service_type="sharepoint_online", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c23e32775340d7bc6f46820313014d8a.asciidoc000066400000000000000000000005301506101734100261610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:525 [source, python] ---- resp = client.index( index="my_test_scores_2", pipeline="my_test_scores_pipeline", document={ "student": "kimchy", "grad_year": "2099", "math_score": 1200, "verbal_score": 800 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c267e90b7873a7c8c8af06f01e958e69.asciidoc000066400000000000000000000003011506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/bi-directional-disaster-recovery.asciidoc:185 [source, python] ---- resp = client.search( index="logs*", size="0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c26b185952ddf9842e18493aca2de147.asciidoc000066400000000000000000000005011506101734100264240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:102 [source, python] ---- resp = client.index( index="books", document={ "name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c27b7d9836aa4ea756f59e9c42911721.asciidoc000066400000000000000000000003361506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/scroll-api.asciidoc:35 [source, python] ---- resp = client.scroll( scroll_id="DXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAD4WYm9laVYtZndUQlNsdDcwakFMNjU1QQ==", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c28f0b0dd3246cb91d6facb3295a61d7.asciidoc000066400000000000000000000004061506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:409 [source, python] ---- resp = client.indices.close( index="kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c2c21e2824fbf6b7198ede30419da82b.asciidoc000066400000000000000000000003071506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:529 [source, python] ---- resp = client.clear_scroll( scroll_id="_all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c2d7c36daac8608d2515c549b2c82436.asciidoc000066400000000000000000000014601506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:491 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "tile": { "geotile_grid": { "field": "location", "precision": 22, "bounds": { "top_left": "POINT (4.9 52.4)", "bottom_right": "POINT (5.0 52.3)" } } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c318fde926842722825a51e5c9c326a9.asciidoc000066400000000000000000000003251506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/trim-tokenfilter.asciidoc:34 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", text=" fox ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c38c882c642dd412e8fa4c3eed49d12f.asciidoc000066400000000000000000000004311506101734100266430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/search-as-you-type.asciidoc:162 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match_phrase_prefix": { "my_field": "brown f" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c3b77e11b16e37e9e37e28dec922432e.asciidoc000066400000000000000000000004311506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-syntax.asciidoc:187 [source, python] ---- resp = client.esql.query( query="\nFROM library\n| WHERE match(author, \"Frank Herbert\", {\"minimum_should_match\": 2, \"operator\": \"AND\"})\n| LIMIT 5\n", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4272ad0309ffbcbe9ce96bf9fb4352a.asciidoc000066400000000000000000000010651506101734100270030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:140 [source, python] ---- resp = client.search( index="place", pretty=True, suggest={ "place_suggestion": { "prefix": "tim", "completion": { "field": "suggest", "size": 10, "contexts": { "place_type": [ "cafe", "restaurants" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c42bc6e74afc3d43cd032ec2bfd77385.asciidoc000066400000000000000000000004551506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc:58 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "word_delimiter" ], text="Neil's-Super-Duper-XL500--42+AutoCoder", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4607ca79b2bcde39305d6f4f21cad37.asciidoc000066400000000000000000000006131506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:226 [source, python] ---- resp = client.esql.query( locale="fr-FR", query="\n ROW birth_date_string = \"2023-01-15T00:00:00.000Z\"\n | EVAL birth_date = date_parse(birth_date_string)\n | EVAL month_of_birth = DATE_FORMAT(\"MMMM\",birth_date)\n | LIMIT 5\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c464ed2001d66a1446f37659dc9efc2a.asciidoc000066400000000000000000000011011506101734100264710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/daterange-aggregation.asciidoc:19 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "range": { "date_range": { "field": "date", "format": "MM-yyyy", "ranges": [ { "to": "now-10M/M" }, { "from": "now-10M/M" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c47f030216a3c89f92f31787fc4d5df5.asciidoc000066400000000000000000000003261506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/plugins.asciidoc:56 [source, python] ---- resp = client.cat.plugins( v=True, s="component", h="name,component,version,description", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c48b8bcd6f41e0d12b58e854e09ea893.asciidoc000066400000000000000000000006701506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:361 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTIPOLYGON (((1002.0 200.0, 1003.0 200.0, 1003.0 300.0, 1002.0 300.0, 102.0 200.0)), ((1000.0 100.0, 1001.0 100.0, 1001.0 100.0, 1000.0 100.0, 1000.0 100.0), (1000.2 100.2, 1000.8 100.2, 1000.8 100.8, 1000.2 100.8, 1000.2 100.2)))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4a1d03dcfb82913d0724a42b0a89f20.asciidoc000066400000000000000000000002321506101734100264450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clearcache.asciidoc:158 [source, python] ---- resp = client.indices.clear_cache() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4b727723b57052b6504bb74fe09abc6.asciidoc000066400000000000000000000005301506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:18 [source, python] ---- resp = client.indices.put_index_template( name="template_1", index_patterns=[ "template*" ], priority=1, template={ "settings": { "number_of_shards": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4c1a87414741a678f6cb91804daf095.asciidoc000066400000000000000000000004331506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:348 [source, python] ---- resp = client.search( index="test", query={ "rank_feature": { "field": "pagerank", "linear": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c4fadbb7f61e5f83ab3fc9cd4b82b5e5.asciidoc000066400000000000000000000005141506101734100271430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:246 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot_2099.05.06", feature_states=[ "geoip" ], include_global_state=False, indices="-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c526fca1609b4c3c1d12dfd218d69a50.asciidoc000066400000000000000000000004001506101734100265330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:383 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c54597143ac86540726f6422fd98b22e.asciidoc000066400000000000000000000010261506101734100262120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-settings.asciidoc:56 [source, python] ---- resp = client.perform_request( "PUT", "/_security/settings", headers={"Content-Type": "application/json"}, body={ "security": { "index.auto_expand_replicas": "0-all" }, "security-tokens": { "index.auto_expand_replicas": "0-all" }, "security-profile": { "index.auto_expand_replicas": "0-all" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c554a1791f29bbbcddda84c64deaba6f.asciidoc000066400000000000000000000003611506101734100271330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:229 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c580092fd3d36c32b09d63921708a67b.asciidoc000066400000000000000000000010271506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/dis-max-query.asciidoc:18 [source, python] ---- resp = client.search( query={ "dis_max": { "queries": [ { "term": { "title": "Quick pets" } }, { "term": { "body": "Quick pets" } } ], "tie_breaker": 0.7 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c5802e9f3f4068fcecb6937b867b270d.asciidoc000066400000000000000000000005511506101734100265220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:400 [source, python] ---- resp = client.search( aggs={ "genres": { "terms": { "field": "genre", "order": { "_count": "asc" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c580990a70028bb49cca8a6bde86bbf6.asciidoc000066400000000000000000000004421506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-update-api-keys.asciidoc:242 [source, python] ---- resp = client.security.bulk_update_api_keys( ids=[ "VuaCfGcBCdbkQm-e5aOx", "H3_AhoIBA9hmeQJdg7ij" ], role_descriptors={}, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c5ba7c4badb5ef5ca32740106e4aa6b6.asciidoc000066400000000000000000000003171506101734100267520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:42 [source, python] ---- resp = client.termvectors( index="my-index-000001", id="1", fields="message", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c5bc577ff92f889225b0d2617adcb48c.asciidoc000066400000000000000000000003411506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/sysconfig/file-descriptors.asciidoc:29 [source, python] ---- resp = client.nodes.stats( metric="process", filter_path="**.max_file_descriptors", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c5cc19e48549fbc5327a9d46874bbeee.asciidoc000066400000000000000000000006131506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:321 [source, python] ---- resp = client.search( index="quantized-image-index", knn={ "field": "image-vector", "query_vector": [ 0.1, -2 ], "k": 10, "num_candidates": 100 }, fields=[ "title" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c5ed7d83ade97a417aef28b9e2871e5d.asciidoc000066400000000000000000000003351506101734100267430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/common-log-format-example.asciidoc:189 [source, python] ---- resp = client.search( index="my-data-stream", filter_path="hits.hits._source", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6151a0788a10a7f40da684d72c3255c.asciidoc000066400000000000000000000026201506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/flattened.asciidoc:225 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "title": "Something really urgent", "labels": { "priority": "urgent", "release": [ "v1.2.5", "v1.3.0" ], "timestamp": { "created": 1541458026, "closed": 1541457010 } } }, { "index": {} }, { "title": "Somewhat less urgent", "labels": { "priority": "high", "release": [ "v1.3.0" ], "timestamp": { "created": 1541458026, "closed": 1541457010 } } }, { "index": {} }, { "title": "Not urgent", "labels": { "priority": "low", "release": [ "v1.2.0" ], "timestamp": { "created": 1541458026, "closed": 1541457010 } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c630a1f891aa9aa651f9982b832a42e1.asciidoc000066400000000000000000000006451506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:923 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "drop": { "description": "Drop documents that contain 'network.name' of 'Guest'", "if": "ctx.network?.name != null && ctx.network.name.contains('Guest')" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6339d09f85000a6432304b0ec63b8f6.asciidoc000066400000000000000000000004621506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-component-template.asciidoc:236 [source, python] ---- resp = client.cluster.put_component_template( name="template_1", template={ "settings": { "number_of_shards": 1 } }, version=123, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c639036b87d02fb864e27c4ca29ef833.asciidoc000066400000000000000000000017041506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/diversified-sampler-aggregation.asciidoc:99 [source, python] ---- resp = client.search( index="stackoverflow", size="0", query={ "query_string": { "query": "tags:kibana" } }, runtime_mappings={ "tags.hash": { "type": "long", "script": "emit(doc['tags'].hashCode())" } }, aggs={ "my_unbiased_sample": { "diversified_sampler": { "shard_size": 200, "max_docs_per_value": 3, "field": "tags.hash" }, "aggs": { "keywords": { "significant_terms": { "field": "tags", "exclude": [ "kibana" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c64b61bedb21b9def8fce5092e677af9.asciidoc000066400000000000000000000007051506101734100270170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters.asciidoc:52 [source, python] ---- resp = client.search( suggest={ "my-suggest-1": { "text": "tring out Elasticsearch", "term": { "field": "message" } }, "my-suggest-2": { "text": "kmichy", "term": { "field": "user.id" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c654b09be981be12fc7be0ba33f8652b.asciidoc000066400000000000000000000031721506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:313 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "multilinestring", "coordinates": [ [ [ 1002, 200 ], [ 1003, 200 ], [ 1003, 300 ], [ 1002, 300 ] ], [ [ 1000, 100 ], [ 1001, 100 ], [ 1001, 100 ], [ 1000, 100 ] ], [ [ 1000.2, 100.2 ], [ 1000.8, 100.2 ], [ 1000.8, 100.8 ], [ 1000.2, 100.8 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c65b00a285f510dcd2865aa3539b4e03.asciidoc000066400000000000000000000002701506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/apis/get-transform.asciidoc:106 [source, python] ---- resp = client.transform.get_transform( size="10", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c66dab0b114fa3e228e1c0e0e5a99b60.asciidoc000066400000000000000000000004041506101734100266100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:247 [source, python] ---- resp = client.search( index="my-index-000001", fields=[ "user.first" ], source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c67b0f00c2e690303c0e5af2f51e0fea.asciidoc000066400000000000000000000006721506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters.asciidoc:13 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match": { "message": "tring out Elasticsearch" } }, suggest={ "my-suggestion": { "text": "tring out Elasticsearch", "term": { "field": "message" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6abe91b5527870face2b826f37ba1da.asciidoc000066400000000000000000000010511506101734100267060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:438 [source, python] ---- resp = client.search( index="image-index", query={ "match": { "title": { "query": "mountain lake", "boost": 0.9 } } }, knn={ "field": "image-vector", "query_vector": [ 54, 10, -2 ], "k": 5, "num_candidates": 50, "boost": 0.1 }, size=10, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6b365c7da97d7e50f36820a7d36f548.asciidoc000066400000000000000000000004511506101734100264400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/decrease-data-node-disk-usage.asciidoc:127 [source, python] ---- resp = client.indices.put_settings( index="my_index,my_other_index", settings={ "index.number_of_replicas": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6b5c695a9b757b5e7325345b206bde5.asciidoc000066400000000000000000000002731506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-pipeline.asciidoc:88 [source, python] ---- resp = client.ingest.delete_pipeline( id="pipeline-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6b8713bd49661d69d6b868f5b991d17.asciidoc000066400000000000000000000005631506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-set-query.asciidoc:85 [source, python] ---- resp = client.index( index="job-candidates", id="1", refresh=True, document={ "name": "Jane Smith", "programming_languages": [ "c++", "java" ], "required_matches": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6bdd5c7de79d6d9ac8e33a397b511e8.asciidoc000066400000000000000000000004621506101734100267430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:327 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "user_id": { "type": "long" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6d39d22188dc7bbfdad811a94cbcc2b.asciidoc000066400000000000000000000004071506101734100270510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/classic-tokenizer.asciidoc:25 [source, python] ---- resp = client.indices.analyze( tokenizer="classic", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c6d5e3b6ff9c665ec5344a4bfa7add80.asciidoc000066400000000000000000000004601506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // modules/network/tracers.asciidoc:106 [source, python] ---- resp = client.cluster.put_settings( persistent={ "transport.tracer.include": "*", "transport.tracer.exclude": "internal:coordination/fault_detection/*" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c733f20641b20e124f26198534755d6d.asciidoc000066400000000000000000000006411506101734100261150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:149 [source, python] ---- resp = client.search( index="my-index-000001", aggs={ "my-first-agg-name": { "terms": { "field": "my-field" } }, "my-second-agg-name": { "avg": { "field": "my-other-field" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c765ce78f3605c0e70d213f22aac8a53.asciidoc000066400000000000000000000005411506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/put-autoscaling-policy.asciidoc:73 [source, python] ---- resp = client.autoscaling.put_autoscaling_policy( name="my_autoscaling_policy", policy={ "roles": [ "data_hot" ], "deciders": { "fixed": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c76cb6a080959b0d87afd780cf814be2.asciidoc000066400000000000000000000011721506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/match-bool-prefix-query.asciidoc:28 [source, python] ---- resp = client.search( query={ "bool": { "should": [ { "term": { "message": "quick" } }, { "term": { "message": "brown" } }, { "prefix": { "message": "f" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c793efe7280e9b6e09981c4d4f832348.asciidoc000066400000000000000000000013211506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/ip.asciidoc:166 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "ip": { "type": "ip" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "ip": [ "192.168.0.1", "192.168.0.1", "10.10.12.123", "2001:db8::1:0:0:1", "::afff:4567:890a" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c79b284fa7a5d7421c6daae62bc697f9.asciidoc000066400000000000000000000003231506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:163 [source, python] ---- resp = client.indices.delete( index="kibana_sample_data_ecommerce", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c79e8ee86b332302b25c5c1f5f4f89d7.asciidoc000066400000000000000000000007501506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/document-level-security.asciidoc:67 [source, python] ---- resp = client.security.put_role( name="dept_role", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "query": { "term": { "department_id": 12 } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c8210f23c10d0642f24c1e43faa4deda.asciidoc000066400000000000000000000017521506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:144 [source, python] ---- resp = client.cluster.put_component_template( name="my-mappings", template={ "mappings": { "properties": { "@timestamp": { "type": "date", "format": "date_optional_time||epoch_millis" }, "message": { "type": "wildcard" } } } }, meta={ "description": "Mappings for @timestamp and message fields", "my-custom-meta-field": "More arbitrary metadata" }, ) print(resp) resp1 = client.cluster.put_component_template( name="my-settings", template={ "settings": { "index.lifecycle.name": "my-lifecycle-policy" } }, meta={ "description": "Settings for ILM", "my-custom-meta-field": "More arbitrary metadata" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c87038b96ab06d9a741a130f94de4f02.asciidoc000066400000000000000000000003021506101734100264070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete.asciidoc:144 [source, python] ---- resp = client.delete( index="my-index-000001", id="1", timeout="5m", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c873f9cd093e26515148f052e28c7805.asciidoc000066400000000000000000000003601506101734100262140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-snapshot.asciidoc:248 [source, python] ---- resp = client.ml.get_model_snapshots( job_id="high_sum_total_sales", start="1575402236000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c8aa8e8c0ac160b8c4efd1ac3b9f48f3.asciidoc000066400000000000000000000007761506101734100270660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/ingest-vectors.asciidoc:35 [source, python] ---- resp = client.indices.create( index="amazon-reviews", mappings={ "properties": { "review_vector": { "type": "dense_vector", "dims": 8, "index": True, "similarity": "cosine" }, "review_text": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c8bbf362f06a0d8dab33ec0d99743343.asciidoc000066400000000000000000000004661506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/classic-tokenfilter.asciidoc:21 [source, python] ---- resp = client.indices.analyze( tokenizer="classic", filter=[ "classic" ], text="The 2 Q.U.I.C.K. Brown-Foxes jumped over the lazy dog's bone.", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c8e2109b19d50467ab83a40006462e9f.asciidoc000066400000000000000000000003471506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/execute-enrich-policy.asciidoc:45 [source, python] ---- resp = client.enrich.execute_policy( name="my-policy", wait_for_completion=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c92b761c18d8e1c3df75c04a21503e16.asciidoc000066400000000000000000000011471506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:360 [source, python] ---- resp = client.cluster.put_component_template( name="logs-my_app-settings", template={ "settings": { "index.default_pipeline": "logs-my_app-default", "index.lifecycle.name": "logs" } }, ) print(resp) resp1 = client.indices.put_index_template( name="logs-my_app-template", index_patterns=[ "logs-my_app-*" ], data_stream={}, priority=500, composed_of=[ "logs-my_app-settings", "logs-my_app-mappings" ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c956bf1f0829a5f0357c0494ed8b6ca3.asciidoc000066400000000000000000000004621506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-template-api.asciidoc:43 [source, python] ---- resp = client.search_template( index="my-index", id="my-search-template", params={ "query_string": "hello world", "from": 0, "size": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c95d5317525c2ff625e6971c277247af.asciidoc000066400000000000000000000004151506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/keyword-tokenizer.asciidoc:61 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "lowercase" ], text="john.SMITH@example.COM", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c96669604d0e66a097ddf3093b025ccd.asciidoc000066400000000000000000000004571506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:126 [source, python] ---- resp = client.search( index="my-index-000001", size=0, aggs={ "my-agg-name": { "terms": { "field": "my-field" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c96e5740b79f703c5b77e3ddc9fdf3a0.asciidoc000066400000000000000000000007701506101734100266630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:210 [source, python] ---- resp = client.indices.put_index_template( name="my-index-template", index_patterns=[ "my-data-stream*" ], data_stream={}, composed_of=[ "my-mappings", "my-settings" ], priority=500, meta={ "description": "Template for my time series data", "my-custom-meta-field": "More arbitrary metadata" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c97fd95ebdcf56cc973582e37f732ed2.asciidoc000066400000000000000000000002521506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/get-enrich-policy.asciidoc:182 [source, python] ---- resp = client.enrich.get_policy() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c9a6ab0a56bb0177f158277185f68302.asciidoc000066400000000000000000000022421506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/subobjects.asciidoc:20 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "metrics": { "type": "object", "subobjects": False, "properties": { "time": { "type": "long" }, "time.min": { "type": "long" }, "time.max": { "type": "long" } } } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="metric_1", document={ "metrics.time": 100, "metrics.time.min": 10, "metrics.time.max": 900 }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="metric_2", document={ "metrics": { "time": 100, "time.min": 10, "time.max": 900 } }, ) print(resp2) resp3 = client.indices.get_mapping( index="my-index-000001", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/c9afa715021f2e6450e72ac73271960c.asciidoc000066400000000000000000000011141506101734100263230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/parent-aggregation.asciidoc:39 [source, python] ---- resp = client.index( index="parent_example", id="1", document={ "join": { "name": "question" }, "body": "I have Windows 2003 server and i bought a new Windows 2008 server...", "title": "Whats the best way to file transfer my site from server to a newer one?", "tags": [ "windows-server-2003", "windows-server-2008", "file-transfer" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c9b6cbe93c8bd23e3f658c3af4e70092.asciidoc000066400000000000000000000030141506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/edgengram-tokenizer.asciidoc:264 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "autocomplete": { "tokenizer": "autocomplete", "filter": [ "lowercase" ] }, "autocomplete_search": { "tokenizer": "lowercase" } }, "tokenizer": { "autocomplete": { "type": "edge_ngram", "min_gram": 2, "max_gram": 10, "token_chars": [ "letter" ] } } } }, mappings={ "properties": { "title": { "type": "text", "analyzer": "autocomplete", "search_analyzer": "autocomplete_search" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "title": "Quick Foxes" }, ) print(resp1) resp2 = client.indices.refresh( index="my-index-000001", ) print(resp2) resp3 = client.search( index="my-index-000001", query={ "match": { "title": { "query": "Quick Fo", "operator": "and" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/c9c396b94bb88098477e2b08b55a12ee.asciidoc000066400000000000000000000022071506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/bulk.asciidoc:774 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "dynamic_templates": [ { "geo_point": { "mapping": { "type": "geo_point" } } } ] }, ) print(resp) resp1 = client.bulk( operations=[ { "index": { "_index": "my_index", "_id": "1", "dynamic_templates": { "work_location": "geo_point" } } }, { "field": "value1", "work_location": "41.12,-71.34", "raw_location": "41.12,-71.34" }, { "create": { "_index": "my_index", "_id": "2", "dynamic_templates": { "home_location": "geo_point" } } }, { "field": "value2", "home_location": "41.12,-71.34" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/c9ce07a7d3d8a317f08535bdd3aa69a3.asciidoc000066400000000000000000000005751506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:224 [source, python] ---- resp = client.update( index="test", id="1", script={ "source": "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'noop' }", "lang": "painless", "params": { "tag": "green" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/c9d9a1d751f20f6197c825cb4378fe9f.asciidoc000066400000000000000000000004621506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-query.asciidoc:21 [source, python] ---- resp = client.search( query={ "terms": { "user.id": [ "kimchy", "elkbee" ], "boost": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca06db2aa4747910278f96315f7be94b.asciidoc000066400000000000000000000011421506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:356 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top": 40.73, "left": -74.1, "bottom": 40.01, "right": -71.12 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca08e511e5907d258081b10a1a9f0072.asciidoc000066400000000000000000000012401506101734100262270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:454 [source, python] ---- resp = client.indices.put_index_template( name="new-data-stream-template", index_patterns=[ "new-data-stream*" ], data_stream={}, priority=500, template={ "mappings": { "properties": { "@timestamp": { "type": "date_nanos" } } }, "settings": { "sort.field": [ "@timestamp" ], "sort.order": [ "desc" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca1cc4bcef22fdf9153833bfe6a55294.asciidoc000066400000000000000000000011541506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:367 [source, python] ---- resp = client.bulk( refresh=True, operations=[ { "index": { "_index": ".ds-my-data-stream-2099.03.08-000003", "_id": "bfspvnIBr7VVZlfp2lqX", "if_seq_no": 0, "if_primary_term": 1 } }, { "@timestamp": "2099-03-08T11:06:07.000Z", "user": { "id": "8a4f500d" }, "message": "Login successful" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca3bcd6278510ebced5f74484033cb36.asciidoc000066400000000000000000000002571506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/apis/get-script-languages-api.asciidoc:17 [source, python] ---- resp = client.get_script_languages() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca5ae0eb7709f3807bc6239cd4bd9141.asciidoc000066400000000000000000000002471506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:246 [source, python] ---- resp = client.security.get_api_key() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca5dda98e977125d40a7fe1e178e213f.asciidoc000066400000000000000000000005551506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:134 [source, python] ---- resp = client.search( index="my-index", query={ "sparse_vector": { "field": "ml.tokens", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ca98afbd6a90f63e02f62239d225313b.asciidoc000066400000000000000000000003741506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/dangling-index-import.asciidoc:65 [source, python] ---- resp = client.dangling_indices.import_dangling_index( index_uuid="zmM4e0JtBkeUjiHD-MihPQ", accept_data_loss=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/caaafef1a76c2bec677704c2dc233218.asciidoc000066400000000000000000000003061506101734100267000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-index.asciidoc:39 [source, python] ---- resp = client.indices.simulate_index_template( name="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/caab99520d3fe41f6154d74a7f696057.asciidoc000066400000000000000000000002641506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-index.asciidoc:16 [source, python] ---- resp = client.indices.delete( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cac74a85c6b352a6e23d8673abae126f.asciidoc000066400000000000000000000014371506101734100266350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/frequent-item-sets-aggregation.asciidoc:257 [source, python] ---- resp = client.async_search.submit( index="kibana_sample_data_ecommerce", size=0, aggs={ "my_agg": { "frequent_item_sets": { "minimum_set_size": 3, "fields": [ { "field": "category.keyword" }, { "field": "geoip.city_name" } ], "size": 3, "filter": { "term": { "geoip.continent_name": "Europe" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cafed0e2c2b1d1574eb4a5ecd514a97a.asciidoc000066400000000000000000000004201506101734100270320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/split-index.asciidoc:16 [source, python] ---- resp = client.indices.split( index="my-index-000001", target="split-my-index-000001", settings={ "index.number_of_shards": 2 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb0c3223fd45148497df73adfba2e9ce.asciidoc000066400000000000000000000005371506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:674 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001", "query": { "term": { "user.id": "kimchy" } } }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb16f1ff85399ddaa418834be580c9de.asciidoc000066400000000000000000000006711506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:136 [source, python] ---- resp = client.security.put_role( name="slm-admin", cluster=[ "manage_slm", "cluster:admin/snapshot/*" ], indices=[ { "names": [ ".slm-history-*" ], "privileges": [ "all" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb1d2a787bbe88974cfc5f132556a51c.asciidoc000066400000000000000000000003331506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:421 [source, python] ---- resp = client.indices.delete_data_stream( name="*", expand_wildcards="all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb2f70601cb004b9ece9b0b43a9dc21a.asciidoc000066400000000000000000000003321506101734100266610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:49 [source, python] ---- resp = client.indices.clear_cache( index="my-index-000001,my-index-000002", request=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb3c483816b6ea150ff6c559fa144d32.asciidoc000066400000000000000000000011541506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:75 [source, python] ---- resp = client.ilm.put_lifecycle( name="timeseries_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_primary_shard_size": "50GB", "max_age": "30d" } } }, "delete": { "min_age": "90d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb4388b72d41c431ec9ca8255b2f65fb.asciidoc000066400000000000000000000011441506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/shape-query.asciidoc:27 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "geometry": { "type": "shape" } } }, ) print(resp) resp1 = client.index( index="example", id="1", refresh="wait_for", document={ "name": "Lucky Landing", "geometry": { "type": "point", "coordinates": [ 1355.400544, 5255.530286 ] } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/cb71332115c92cfb89375abd30b8bbbb.asciidoc000066400000000000000000000002161506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat.asciidoc:42 [source, python] ---- resp = client.cat.master( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cb71c6ecfb8b19725c374572444e5d32.asciidoc000066400000000000000000000006231506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:366 [source, python] ---- resp = client.search( index="my-index-000001", aggs={ "avg_start": { "avg": { "field": "measures.start" } }, "avg_end": { "avg": { "field": "measures.end" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cba3462a307e2483c14e3e198f6960e3.asciidoc000066400000000000000000000014421506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/put-lifecycle.asciidoc:66 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "_meta": { "description": "used for nginx log", "project": { "name": "myProject", "department": "myDepartment" } }, "phases": { "warm": { "min_age": "10d", "actions": { "forcemerge": { "max_num_segments": 1 } } }, "delete": { "min_age": "30d", "actions": { "delete": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cbc2b5595890f87165aab1a741b1d22c.asciidoc000066400000000000000000000015401506101734100264640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-manual.asciidoc:224 [source, python] ---- resp = client.ingest.put_pipeline( id="my-timestamp-pipeline", description="Shifts the @timestamp to the last 15 minutes", processors=[ { "set": { "field": "ingest_time", "value": "{{_ingest.timestamp}}" } }, { "script": { "lang": "painless", "source": "\n def delta = ChronoUnit.SECONDS.between(\n ZonedDateTime.parse(\"2022-06-21T15:49:00Z\"),\n ZonedDateTime.parse(ctx[\"ingest_time\"])\n );\n ctx[\"@timestamp\"] = ZonedDateTime.parse(ctx[\"@timestamp\"]).plus(delta,ChronoUnit.SECONDS).toString();\n " } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cbfd6f23f8283e64ec3157c65bb722c4.asciidoc000066400000000000000000000002651506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat.asciidoc:218 [source, python] ---- resp = client.cat.templates( v=True, s="order:desc,index_patterns", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cc0cca5556ec6224c7134c233734beed.asciidoc000066400000000000000000000002331506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/getting-started.asciidoc:132 [source, python] ---- resp = client.cluster.remote_info() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cc56be758d5d75febbd975786187c861.asciidoc000066400000000000000000000003561506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-service-token.asciidoc:103 [source, python] ---- resp = client.security.create_service_token( namespace="elastic", service="fleet-server", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cc5eefcc2102aae7e87b0c87b4af10b8.asciidoc000066400000000000000000000014541506101734100270460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:54 [source, python] ---- resp = client.indices.create( index="mv", mappings={ "properties": { "b": { "type": "keyword" } } }, ) print(resp) resp1 = client.bulk( index="mv", refresh=True, operations=[ { "index": {} }, { "a": 1, "b": [ "foo", "foo", "bar" ] }, { "index": {} }, { "a": 2, "b": [ "bar", "bar" ] } ], ) print(resp1) resp2 = client.esql.query( query="FROM mv | LIMIT 2", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/cc7f1c74ede6810e2c9db19256d6b653.asciidoc000066400000000000000000000004551506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:193 [source, python] ---- resp = client.search( index="my-index", query={ "match": { "http.response": "304" } }, fields=[ "http.response" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cc90639f2e65bd89cb73296cac6135cf.asciidoc000066400000000000000000000003511506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/delete-trained-models.asciidoc:60 [source, python] ---- resp = client.ml.delete_trained_model( model_id="regression-job-one-1574775307356", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cc9dac8db7a1482e2fbe3235197c3de1.asciidoc000066400000000000000000000007151506101734100267170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/restore-snapshot-api.asciidoc:248 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="snapshot_2", wait_for_completion=True, indices="index_1,index_2", ignore_unavailable=True, include_global_state=False, rename_pattern="index_(.+)", rename_replacement="restored_index_$1", include_aliases=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ccc613951c61f0b17e1ed8a2d3ae54a2.asciidoc000066400000000000000000000033201506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-ingest.asciidoc:62 [source, python] ---- resp = client.simulate.ingest( docs=[ { "_index": "my-index", "_id": "id", "_source": { "foo": "bar" } }, { "_index": "my-index", "_id": "id", "_source": { "foo": "rab" } } ], pipeline_substitutions={ "my-pipeline": { "processors": [ { "set": { "field": "field3", "value": "value3" } } ] } }, component_template_substitutions={ "my-component-template": { "template": { "mappings": { "dynamic": "true", "properties": { "field3": { "type": "keyword" } } }, "settings": { "index": { "default_pipeline": "my-pipeline" } } } } }, index_template_substitutions={ "my-index-template": { "index_patterns": [ "my-index-*" ], "composed_of": [ "component_template_1", "component_template_2" ] } }, mapping_addition={ "dynamic": "strict", "properties": { "foo": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ccec66fb20d5ede6c691e0890cfe402a.asciidoc000066400000000000000000000003421506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-job.asciidoc:91 [source, python] ---- resp = client.ml.delete_job( job_id="total-requests", wait_for_completion=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ccf84c1e5e5602a9e841cb8f7e3bb29f.asciidoc000066400000000000000000000007431506101734100267350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/standard-analyzer.asciidoc:284 [source, python] ---- resp = client.indices.create( index="standard_example", settings={ "analysis": { "analyzer": { "rebuilt_standard": { "tokenizer": "standard", "filter": [ "lowercase" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd16538654e0f834ff19fe6cf329c398.asciidoc000066400000000000000000000007601506101734100264550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:65 [source, python] ---- resp = client.indices.create( index="hugging-face-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 768, "element_type": "float" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd373a6eb1ef4748616500b26fab3006.asciidoc000066400000000000000000000007221506101734100264020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/async-search.asciidoc:21 [source, python] ---- resp = client.async_search.submit( index="sales*", size="0", sort=[ { "date": { "order": "asc" } } ], aggs={ "sale_date": { "date_histogram": { "field": "date", "calendar_interval": "1d" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd38c601ab293a6ec0e2df71d0c96b58.asciidoc000066400000000000000000000013061506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template.asciidoc:353 [source, python] ---- resp = client.cluster.put_component_template( name="template_with_2_shards", template={ "settings": { "index.number_of_shards": 2 } }, ) print(resp) resp1 = client.cluster.put_component_template( name="template_with_3_shards", template={ "settings": { "index.number_of_shards": 3 } }, ) print(resp1) resp2 = client.indices.put_index_template( name="template_1", index_patterns=[ "t*" ], composed_of=[ "template_with_2_shards", "template_with_3_shards" ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/cd67ad2c09fafef2d441c3502d0bb3d7.asciidoc000066400000000000000000000003541506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/put-lifecycle.asciidoc:84 [source, python] ---- resp = client.indices.put_data_lifecycle( name="my-data-stream", data_retention="7d", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd6eee201a233b989ac1f2794fa6d640.asciidoc000066400000000000000000000010011506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1107 [source, python] ---- resp = client.eql.search( index="my-data-stream", filter_path="-hits.events._source", runtime_mappings={ "day_of_week": { "type": "keyword", "script": "emit(doc['@timestamp'].value.dayOfWeekEnum.toString())" } }, query="\n process where process.name == \"regsvr32.exe\"\n ", fields=[ "@timestamp", "day_of_week" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd6fa7f63c93bb04824acd3a7d1f8de3.asciidoc000066400000000000000000000015631506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-not-query.asciidoc:13 [source, python] ---- resp = client.search( query={ "span_not": { "include": { "span_term": { "field1": "hoya" } }, "exclude": { "span_near": { "clauses": [ { "span_term": { "field1": "la" } }, { "span_term": { "field1": "hoya" } } ], "slop": 0, "in_order": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd7da0c3769682f546cc1888e569382e.asciidoc000066400000000000000000000010071506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:776 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match_phrase": { "message": "number 1" } }, highlight={ "fields": { "message": { "type": "plain", "fragment_size": 15, "number_of_fragments": 3, "fragmenter": "span" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd8006165ac64f1ef99af48e5a35a25b.asciidoc000066400000000000000000000003321506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-app-privileges.asciidoc:64 [source, python] ---- resp = client.security.get_privileges( application="myapp", name="read", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cd93919e13f656ad2e6629f45c579b93.asciidoc000066400000000000000000000002601506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shard-stores.asciidoc:120 [source, python] ---- resp = client.indices.shard_stores( index="test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cda045dfd79acd160ed8668f2ee17ea7.asciidoc000066400000000000000000000004341506101734100270110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/term-query.asciidoc:170 [source, python] ---- resp = client.search( index="my-index-000001", pretty=True, query={ "match": { "full_text": "Quick Brown Foxes!" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdb68b3f565df7c85e52a55864b37d40.asciidoc000066400000000000000000000004711506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:364 [source, python] ---- resp = client.indices.create( index="my-new-index-000001", mappings={ "properties": { "user_id": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdb7613b445e6ed6e8b473f9cae1af90.asciidoc000066400000000000000000000017071506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:497 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "all_of": { "ordered": True, "max_gaps": 1, "intervals": [ { "match": { "query": "my favorite food", "max_gaps": 0, "ordered": True } }, { "match": { "query": "cold porridge", "max_gaps": 4, "ordered": True } } ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdc04e6d3d37f036c7045ee4a582ef06.asciidoc000066400000000000000000000013671506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:610 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "strings_as_keywords": { "match_mapping_type": "string", "mapping": { "type": "text", "norms": False, "fields": { "keyword": { "type": "keyword", "ignore_above": 256 } } } } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdc38c98320a0df705ec8d173c725375.asciidoc000066400000000000000000000005321506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:287 [source, python] ---- resp = client.search( index="my_locations", size=0, aggs={ "grouped": { "geohex_grid": { "field": "location", "precision": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdce7bc083dfb36e6f1d465a5c9d5049.asciidoc000066400000000000000000000003271506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connector-sync-jobs-api.asciidoc:56 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdd29b01e730b3996de68a2788050021.asciidoc000066400000000000000000000003061506101734100262530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/enrich/delete-enrich-policy.asciidoc:42 [source, python] ---- resp = client.enrich.delete_policy( name="my-policy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdd7127681254f4d614cc075f9e6fbcf.asciidoc000066400000000000000000000004251506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:427 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", query={ "term": { "user.id": "kimchy" } }, max_docs=1, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cde19d110a58317610033ea3dcb0eb80.asciidoc000066400000000000000000000014731506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:737 [source, python] ---- resp = client.render_search_template( source="\n {\n \"query\": {\n \"match\": {\n {{#query_message}}\n {{#query_string}}\n \"message\": \"Hello {{#first_name_section}}{{first_name}}{{/first_name_section}} {{#last_name_section}}{{last_name}}{{/last_name_section}}\"\n {{/query_string}}\n {{/query_message}}\n }\n }\n }\n ", params={ "query_message": { "query_string": { "first_name_section": { "first_name": "John" }, "last_name_section": { "last_name": "kimchy" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cde4104a29dfe942d55863cdd8718627.asciidoc000066400000000000000000000002541506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/start-slm.asciidoc:76 [source, python] ---- resp = client.slm.get_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdf400299acd1c7b1b7bb42e284e3d08.asciidoc000066400000000000000000000005031506101734100266210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:141 [source, python] ---- resp = client.update( index="test", id="1", script={ "source": "ctx._source.tags.add(params.tag)", "lang": "painless", "params": { "tag": "blue" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cdfd4fef983c1c0fe8d7417f67d01eae.asciidoc000066400000000000000000000003751506101734100271010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/red-yellow-cluster-status.asciidoc:157 [source, python] ---- resp = client.indices.put_settings( settings={ "index.number_of_replicas": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce0a1aba713b0448b0c6a504af7b3a08.asciidoc000066400000000000000000000002401506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:339 [source, python] ---- resp = client.slm.get_stats() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce0c3d7330727f7673cf68fc9a1cfb86.asciidoc000066400000000000000000000002671506101734100266030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clearcache.asciidoc:17 [source, python] ---- resp = client.indices.clear_cache( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce247fc08371e1b30cb52195e521c076.asciidoc000066400000000000000000000013241506101734100263220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:219 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": [ -74.1, 40.73 ], "bottom_right": [ -71.12, 40.01 ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce2c2e8f5a2e4daf051b6e10122e5aae.asciidoc000066400000000000000000000006141506101734100267470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:519 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "text_embedding": { "type": "dense_vector", "dims": 384, "index_options": { "type": "int4_hnsw" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce3c391c2b1915cfc44a2917bca71d19.asciidoc000066400000000000000000000010351506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:650 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="loganalytics", description="Outlier detection on log data", source={ "index": "logdata" }, dest={ "index": "logdata_out" }, analysis={ "outlier_detection": { "compute_feature_influence": True, "outlier_fraction": 0.05, "standardization_enabled": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce725697f93b3eebb3a266314568565a.asciidoc000066400000000000000000000010731506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/fingerprint-analyzer.asciidoc:159 [source, python] ---- resp = client.indices.create( index="fingerprint_example", settings={ "analysis": { "analyzer": { "rebuilt_fingerprint": { "tokenizer": "standard", "filter": [ "lowercase", "asciifolding", "fingerprint" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce8471d31e5d60309e142feb040fd2f8.asciidoc000066400000000000000000000002471506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/query-watches.asciidoc:73 [source, python] ---- resp = client.watcher.query_watches() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce899fcf55da72fc32e623d1ad88b301.asciidoc000066400000000000000000000006151506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/ignore-missing-component-templates.asciidoc:72 [source, python] ---- resp = client.cluster.put_component_template( name="logs-foo_component2", template={ "mappings": { "properties": { "host.ip": { "type": "ip" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ce8eebfb810335803630abe83278bee7.asciidoc000066400000000000000000000002761506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:253 [source, python] ---- resp = client.security.get_api_key( active_only=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cecfaa659af6646b3b67d7b311586fa0.asciidoc000066400000000000000000000021651506101734100266460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:396 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information from arrays", processors=[ { "foreach": { "field": "attachments", "processor": { "attachment": { "target_field": "_ingest._value.attachment", "field": "_ingest._value.data", "remove_binary": True } } } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="attachment", document={ "attachments": [ { "filename": "ipsum.txt", "data": "dGhpcyBpcwpqdXN0IHNvbWUgdGV4dAo=" }, { "filename": "test.txt", "data": "VGhpcyBpcyBhIHRlc3QK" } ] }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/cedb56a71cc743d80263ce352bb21720.asciidoc000066400000000000000000000006001506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-elser.asciidoc:157 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="my-elser-model", inference_config={ "service": "elser", "service_settings": { "num_allocations": 1, "num_threads": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cee491dd0a8d10ed0cb11a2faa0c99f0.asciidoc000066400000000000000000000010201506101734100270160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:1185 [source, python] ---- resp = client.ml.infer_trained_model( model_id="model2", docs=[ { "text_field": "The Amazon rainforest covers most of the Amazon basin in South America" } ], inference_config={ "ner": { "tokenization": { "bert": { "truncate": "first" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cee591c1fc70d4f180c623a3a6d07755.asciidoc000066400000000000000000000003171506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:78 [source, python] ---- resp = client.security.get_token( grant_type="client_credentials", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf23f18761df33f08bc6f6d1875496fd.asciidoc000066400000000000000000000004531506101734100265260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:399 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "routing.allocation.total_shards_per_node": 5 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf47cd4a39cd62a3ecad919e54a67bca.asciidoc000066400000000000000000000003531506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/ignored-field.asciidoc:36 [source, python] ---- resp = client.search( query={ "term": { "_ignored": "@timestamp" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf5dab4334783ca9b8942eab68fb7174.asciidoc000066400000000000000000000021711506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/nested-aggregation.asciidoc:114 [source, python] ---- resp = client.search( index="products", size="0", query={ "match": { "name": "led tv" } }, aggs={ "resellers": { "nested": { "path": "resellers" }, "aggs": { "filter_reseller": { "filter": { "bool": { "filter": [ { "term": { "resellers.reseller": "companyB" } } ] } }, "aggs": { "min_price": { "min": { "field": "resellers.price" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf75a880c749a2f2010a8ec3f348e5c3.asciidoc000066400000000000000000000004501506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1391 [source, python] ---- resp = client.eql.search( index="my-data-stream", keep_on_completion=True, wait_for_completion_timeout="2s", query="\n process where process.name == \"cmd.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf8ca470156698dbf47fdc822d0a714f.asciidoc000066400000000000000000000003201506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/get-desired-nodes.asciidoc:70 [source, python] ---- resp = client.perform_request( "GET", "/_internal/desired_nodes/_latest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cf9f51d719a2e90ffe36ed6fe56a4a69.asciidoc000066400000000000000000000010271506101734100267460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:83 [source, python] ---- resp = client.security.put_role( name="remote-replication", cluster=[ "manage_ccr" ], indices=[ { "names": [ "follower-index-name" ], "privileges": [ "monitor", "read", "write", "manage_follow_index" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cfad3631be0634ee49c424f9ccec62d9.asciidoc000066400000000000000000000003061506101734100267220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:174 [source, python] ---- resp = client.security.invalidate_api_key( owner=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cfd4b34f35e531a20739a3b308d57134.asciidoc000066400000000000000000000006371506101734100263310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:199 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_docs": 100000000 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/cffce059425d3d21e7f9571500d63524.asciidoc000066400000000000000000000003011506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delete-roles.asciidoc:46 [source, python] ---- resp = client.security.delete_role( name="my_admin_role", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d003ee256d24aa6000bd9dbf1d608dc5.asciidoc000066400000000000000000000010361506101734100266100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:78 [source, python] ---- resp = client.ingest.put_pipeline( id="elser-v2-test", processors=[ { "inference": { "model_id": ".elser_model_2", "input_output": [ { "input_field": "content", "output_field": "content_embedding" } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d003f9110e5a474230abe11f36da9297.asciidoc000066400000000000000000000011331506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/redact.asciidoc:50 [source, python] ---- resp = client.ingest.simulate( pipeline={ "description": "Hide my IP", "processors": [ { "redact": { "field": "message", "patterns": [ "%{IP:client}" ] } } ] }, docs=[ { "_source": { "message": "55.3.244.1 GET /index.html 15824 0.043" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d01d309b0257d6fbca6d0941adeb3256.asciidoc000066400000000000000000000016201506101734100265340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/simulate-index.asciidoc:151 [source, python] ---- resp = client.cluster.put_component_template( name="ct1", template={ "settings": { "index.number_of_shards": 2 } }, ) print(resp) resp1 = client.cluster.put_component_template( name="ct2", template={ "settings": { "index.number_of_replicas": 0 }, "mappings": { "properties": { "@timestamp": { "type": "date" } } } }, ) print(resp1) resp2 = client.indices.put_index_template( name="final-template", index_patterns=[ "my-index-*" ], composed_of=[ "ct1", "ct2" ], priority=5, ) print(resp2) resp3 = client.indices.simulate_index_template( name="my-index-000001", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/d03139a851888db53f8b7affd85eb495.asciidoc000066400000000000000000000003151506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/check-in-connector-api.asciidoc:75 [source, python] ---- resp = client.connector.check_in( connector_id="my-connector", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d0378fe5e3aad05a2fd2e6e81213374f.asciidoc000066400000000000000000000021531506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:331 [source, python] ---- resp = client.indices.create( index="bulgarian_example", settings={ "analysis": { "filter": { "bulgarian_stop": { "type": "stop", "stopwords": "_bulgarian_" }, "bulgarian_keywords": { "type": "keyword_marker", "keywords": [ "пример" ] }, "bulgarian_stemmer": { "type": "stemmer", "language": "bulgarian" } }, "analyzer": { "rebuilt_bulgarian": { "tokenizer": "standard", "filter": [ "lowercase", "bulgarian_stop", "bulgarian_keywords", "bulgarian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d03b0e2f0f3f5ac8d53287c445007a89.asciidoc000066400000000000000000000006671506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/similarity.asciidoc:32 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "default_field": { "type": "text" }, "boolean_sim_field": { "type": "text", "similarity": "boolean" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d04f0c8c44e8b4fb55f2e7d9d05977e7.asciidoc000066400000000000000000000027131506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:155 [source, python] ---- resp = client.bulk( operations=[ { "index": { "_index": "books" } }, { "name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585 }, { "index": { "_index": "books" } }, { "name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328 }, { "index": { "_index": "books" } }, { "name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227 }, { "index": { "_index": "books" } }, { "name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268 }, { "index": { "_index": "books" } }, { "name": "The Handmaids Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d050c6fa7d806457a5f32d30b07e9521.asciidoc000066400000000000000000000011211506101734100263220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:504 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "dot_expander": { "description": "Expand 'my-object-field.my-property'", "field": "my-object-field.my-property" } }, { "set": { "description": "Set 'my-object-field.my-property' to 10", "field": "my-object-field.my-property", "value": 10 } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d0546f047359b85a7e98207dc8de896a.asciidoc000066400000000000000000000013421506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/coerce.asciidoc:60 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.mapping.coerce": False }, mappings={ "properties": { "number_one": { "type": "integer", "coerce": True }, "number_two": { "type": "integer" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "number_one": "10" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "number_two": "10" }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/d05b2a37106fce0ebbd41e2fd6bd26c2.asciidoc000066400000000000000000000027341506101734100267550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/min-aggregation.asciidoc:126 [source, python] ---- resp = client.indices.create( index="metrics_index", mappings={ "properties": { "latency_histo": { "type": "histogram" } } }, ) print(resp) resp1 = client.index( index="metrics_index", id="1", refresh=True, document={ "network.name": "net-1", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp1) resp2 = client.index( index="metrics_index", id="2", refresh=True, document={ "network.name": "net-2", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp2) resp3 = client.search( index="metrics_index", size="0", filter_path="aggregations", aggs={ "min_latency": { "min": { "field": "latency_histo" } } }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/d06a649bc38aa9a6433b64efa78d8cb5.asciidoc000066400000000000000000000033671506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:52 [source, python] ---- resp = client.bulk( index="my-index", refresh=True, operations=[ { "index": {} }, { "timestamp": "2020-04-30T14:30:17-05:00", "message": "40.135.0.0 - - [30/Apr/2020:14:30:17 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:30:53-05:00", "message": "232.0.0.0 - - [30/Apr/2020:14:30:53 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:12-05:00", "message": "26.1.0.0 - - [30/Apr/2020:14:31:12 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:19-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:19 -0500] \"GET /french/splash_inet.html HTTP/1.0\" 200 3781" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:22-05:00", "message": "247.37.0.0 - - [30/Apr/2020:14:31:22 -0500] \"GET /images/hm_nbg.jpg HTTP/1.0\" 304 0" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:27-05:00", "message": "252.0.0.0 - - [30/Apr/2020:14:31:27 -0500] \"GET /images/hm_bg.jpg HTTP/1.0\" 200 24736" }, { "index": {} }, { "timestamp": "2020-04-30T14:31:28-05:00", "message": "not a valid apache log" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d095b422d9803c02b62c01adffc85376.asciidoc000066400000000000000000000002501506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/get-job.asciidoc:94 [source, python] ---- resp = client.rollup.get_jobs( id="sensor", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d0dee031197214b59ff9ac7540527d2c.asciidoc000066400000000000000000000014341506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:43 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movfn": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "MovingFunctions.unweightedAvg(values)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d0fad375f6e074e9067ed93d3faa07bd.asciidoc000066400000000000000000000040641506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/cartesian-bounds-aggregation.asciidoc:12 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (491.2350 5237.4081)", "city": "Amsterdam", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (490.1618 5236.9219)", "city": "Amsterdam", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (491.4722 5237.1667)", "city": "Amsterdam", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (440.5200 5122.2900)", "city": "Antwerp", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (233.6389 4886.1111)", "city": "Paris", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (232.7000 4886.0000)", "city": "Paris", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", query={ "match": { "name": "musée" } }, aggs={ "viewport": { "cartesian_bounds": { "field": "location" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/d0fde00ef381e61b8a9e99f18cb5970a.asciidoc000066400000000000000000000004531506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:181 [source, python] ---- resp = client.search( query={ "simple_query_string": { "query": "foo | bar + baz*", "flags": "OR|AND|PREFIX" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d11ea753a5d86f7e630fd69a069948b1.asciidoc000066400000000000000000000003621506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:168 [source, python] ---- resp = client.sql.query( format="json", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1299b9ae1e621d2fdd0b8644c142ace.asciidoc000066400000000000000000000021261506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/categorize-text-aggregation.asciidoc:334 [source, python] ---- resp = client.search( index="log-messages", filter_path="aggregations", aggs={ "daily": { "date_histogram": { "field": "time", "fixed_interval": "1d" }, "aggs": { "categories": { "categorize_text": { "field": "message", "categorization_filters": [ "\\w+\\_\\d{3}" ] }, "aggs": { "hit": { "top_hits": { "size": 1, "sort": [ "time" ], "_source": "message" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d12df43ffcdcd937bae9b26fb475e239.asciidoc000066400000000000000000000004101506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/uaxurlemail-tokenizer.asciidoc:14 [source, python] ---- resp = client.indices.analyze( tokenizer="uax_url_email", text="Email me at john.smith@global-international.com", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d133b5d82238f7d4778c341cbe0bc969.asciidoc000066400000000000000000000007051506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-termvectors.asciidoc:141 [source, python] ---- resp = client.mtermvectors( docs=[ { "_index": "my-index-000001", "doc": { "message": "test test test" } }, { "_index": "my-index-000001", "doc": { "message": "Another test ..." } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d13c7cdfc976e0c7b70737cd6a7becb8.asciidoc000066400000000000000000000015171506101734100270140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/rate-aggregation.asciidoc:411 [source, python] ---- resp = client.search( index="sales", size=0, runtime_mappings={ "price.adjusted": { "type": "double", "script": { "source": "emit(doc['price'].value * params.adjustment)", "params": { "adjustment": 0.9 } } } }, aggs={ "by_date": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "avg_price": { "rate": { "field": "price.adjusted" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d14fe5838fc02224f4b5ade2626d6026.asciidoc000066400000000000000000000003101506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/apis/explain.asciidoc:106 [source, python] ---- resp = client.ilm.explain_lifecycle( index="my-index-000001", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1b53bc9794e8609bd6f2245624bf977.asciidoc000066400000000000000000000013061506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/estimate-model-memory.asciidoc:60 [source, python] ---- resp = client.ml.estimate_model_memory( analysis_config={ "bucket_span": "5m", "detectors": [ { "function": "sum", "field_name": "bytes", "by_field_name": "status", "partition_field_name": "app" } ], "influencers": [ "source_ip", "dest_ip" ] }, overall_cardinality={ "status": 10, "app": 50 }, max_bucket_cardinality={ "source_ip": 300, "dest_ip": 30 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1ce66957f8bd84bf01c4bfaee3ba0c3.asciidoc000066400000000000000000000004731506101734100270570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:974 [source, python] ---- resp = client.eql.search( index="my-data-stream", filter_path="hits.events._source.@timestamp,hits.events._source.process.pid", query="\n process where process.name == \"regsvr32.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1d8b6e642db1a7c70dbbf0fe6d8e92d.asciidoc000066400000000000000000000034331506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:195 [source, python] ---- resp = client.search( index="my-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "multi_match": { "query": "How is the weather in Jamaica?", "fields": [ "title", "description" ] } } } }, { "standard": { "query": { "sparse_vector": { "field": "ml.inference.title_expanded.predicted_value", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "boost": 1 } } } }, { "standard": { "query": { "sparse_vector": { "field": "ml.inference.description_expanded.predicted_value", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "boost": 1 } } } } ], "window_size": 10, "rank_constant": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1e0fee64389e7c8d4c092030626b61f.asciidoc000066400000000000000000000002711506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:215 [source, python] ---- resp = client.security.get_api_key( name="my-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1ea13e1e8372cbf1480a414723ff55a.asciidoc000066400000000000000000000014571506101734100264700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-zoom.asciidoc:247 [source, python] ---- resp = client.security.create_api_key( name="connector_name-connector-api-key", role_descriptors={ "connector_name-connector-role": { "cluster": [ "monitor", "manage_connector" ], "indices": [ { "names": [ "index_name", ".search-acl-filter-index_name", ".elastic-connectors*" ], "privileges": [ "all" ], "allow_restricted_indices": False } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1ecce3632ae338b5e329b0e5ff3bed7.asciidoc000066400000000000000000000007111506101734100267730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:382 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_join_field": { "type": "join", "relations": { "question": "answer" }, "eager_global_ordinals": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d1fde25de1980b7e84fa878289fd0bcb.asciidoc000066400000000000000000000003551506101734100267430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:660 [source, python] ---- resp = client.search( index="my-index-000001", size="0", q="extra:test", filter_path="hits.total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d23452f333b77bf5b463310e2a665560.asciidoc000066400000000000000000000010471506101734100261670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/run-as-privilege.asciidoc:51 [source, python] ---- resp = client.security.put_role( name="my_director", refresh=True, cluster=[ "manage" ], indices=[ { "names": [ "index1", "index2" ], "privileges": [ "manage" ] } ], run_as=[ "jacknich", "rdeniro" ], metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d260225cf97e068ead2a8a6bb5aefd90.asciidoc000066400000000000000000000021301506101734100267110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1551 [source, python] ---- resp = client.indices.create( index="russian_example", settings={ "analysis": { "filter": { "russian_stop": { "type": "stop", "stopwords": "_russian_" }, "russian_keywords": { "type": "keyword_marker", "keywords": [ "пример" ] }, "russian_stemmer": { "type": "stemmer", "language": "russian" } }, "analyzer": { "rebuilt_russian": { "tokenizer": "standard", "filter": [ "lowercase", "russian_stop", "russian_keywords", "russian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d268aec16bb1eb909b634e856175094c.asciidoc000066400000000000000000000012451506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/stop-analyzer.asciidoc:133 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_stop_analyzer": { "type": "stop", "stopwords": [ "the", "over" ] } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_stop_analyzer", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d27591881da6f5767523b1beb233adc7.asciidoc000066400000000000000000000003661506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-azure.asciidoc:87 [source, python] ---- resp = client.snapshot.create_repository( name="my_backup", repository={ "type": "azure" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d2e7dead222cfbebbd2c21a7cc1893b4.asciidoc000066400000000000000000000003261506101734100271160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // api-conventions.asciidoc:260 [source, python] ---- resp = client.cluster.state( metric="metadata", filter_path="metadata.indices.*.system", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d2f52c106685bd8eab47e11d644d7a70.asciidoc000066400000000000000000000014521506101734100264750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/date.asciidoc:41 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "date": { "type": "date" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "date": "2015-01-01" }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "date": "2015-01-01T12:10:30Z" }, ) print(resp2) resp3 = client.index( index="my-index-000001", id="3", document={ "date": 1420070400001 }, ) print(resp3) resp4 = client.search( index="my-index-000001", sort={ "date": "asc" }, ) print(resp4) ---- python-elasticsearch-9.1.1/docs/examples/d2f6040c058a9555dfa62bb42d896a8f.asciidoc000066400000000000000000000005121506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:513 [source, python] ---- resp = client.search( index="my_queries1", query={ "percolate": { "field": "query", "document": { "my_field": "abcd" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d2f6fb271e97fde8685d7744e6718cc7.asciidoc000066400000000000000000000004371506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:234 [source, python] ---- resp = client.index( index="text_payloads", id="1", document={ "text": "the|0 brown|3 fox|4 is|0 quick|10" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d305110a8cabfbebd1e38d85559d1023.asciidoc000066400000000000000000000034671506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:436 [source, python] ---- resp = client.indices.create( index="cjk_example", settings={ "analysis": { "filter": { "english_stop": { "type": "stop", "stopwords": [ "a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "no", "not", "of", "on", "or", "s", "such", "t", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with", "www" ] } }, "analyzer": { "rebuilt_cjk": { "tokenizer": "standard", "filter": [ "cjk_width", "lowercase", "cjk_bigram", "english_stop" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3263afc69b6f969b9bbd8738cd07b97.asciidoc000066400000000000000000000003021506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-pause-follow.asciidoc:73 [source, python] ---- resp = client.ccr.pause_follow( index="follower_index", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3440ec81dde5f1a01c0206cb35e539c.asciidoc000066400000000000000000000006011506101734100265270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-reindex.asciidoc:106 [source, python] ---- resp = client.reindex( wait_for_completion=False, source={ "index": "test-data", "size": 50 }, dest={ "index": "azure-openai-embeddings", "pipeline": "azure_openai_embeddings_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d34946f59b6f938b141a37cb0b729308.asciidoc000066400000000000000000000005721506101734100262750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:58 [source, python] ---- resp = client.enrich.put_policy( name="postal_policy", geo_match={ "indices": "postal_codes", "match_field": "location", "enrich_fields": [ "location", "postal_code" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d35a4d78a8b70c9e4d636efb0a92be9d.asciidoc000066400000000000000000000007761506101734100267430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/multi-terms-aggregation.asciidoc:61 [source, python] ---- resp = client.search( index="products", aggs={ "genres_and_products": { "multi_terms": { "terms": [ { "field": "genre" }, { "field": "product" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d35c8cf7a98b3f112e1de8797ec6689d.asciidoc000066400000000000000000000004271506101734100266200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/oidc-prepare-authentication-api.asciidoc:134 [source, python] ---- resp = client.security.oidc_prepare_authentication( iss="http://127.0.0.1:8080", login_hint="this_is_an_opaque_string", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3672a87a857ddb87519788236e57497.asciidoc000066400000000000000000000013671506101734100261770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:232 [source, python] ---- resp = client.search( index="jinaai-index", retriever={ "text_similarity_reranker": { "retriever": { "standard": { "query": { "semantic": { "field": "content", "query": "who inspired taking care of the sea?" } } } }, "field": "content", "rank_window_size": 100, "inference_id": "jinaai-rerank", "inference_text": "who inspired taking care of the sea?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d37b065a94b3ff65a2a8a204fc3b097c.asciidoc000066400000000000000000000003331506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1324 [source, python] ---- resp = client.eql.get_status( id="FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d37b0bda2bd24ab310e6b26708c7c6fb.asciidoc000066400000000000000000000014521506101734100266730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/movfn-aggregation.asciidoc:144 [source, python] ---- resp = client.search( size=0, aggs={ "my_date_histo": { "date_histogram": { "field": "date", "calendar_interval": "1M" }, "aggs": { "the_sum": { "sum": { "field": "price" } }, "the_movavg": { "moving_fn": { "buckets_path": "the_sum", "window": 10, "script": "return values.length > 0 ? values[0] : Double.NaN" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3a0f648d0fd50b54a4e9ebe363c5047.asciidoc000066400000000000000000000026511506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:221 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "linear": { "retrievers": [ { "retriever": { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, "weight": 2, "normalizer": "minmax" }, { "retriever": { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } }, "weight": 1.5, "normalizer": "minmax" } ], "rank_window_size": 10 } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3a5b70d493e0bd77b3f2b586341c83c.asciidoc000066400000000000000000000010521506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1635 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "http.responses": { "type": "long", "script": "\n String response=dissect('%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{response} %{size}').extract(doc[\"message\"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3d117fec34301520ccdb26332e7c98a.asciidoc000066400000000000000000000007741506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/registered-domain.asciidoc:35 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "registered_domain": { "field": "fqdn", "target_field": "url" } } ] }, docs=[ { "_source": { "fqdn": "www.example.ac.uk" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3dccdb15822e971ededb9f6f7d8ada1.asciidoc000066400000000000000000000005311506101734100271450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:354 [source, python] ---- resp = client.search( query={ "query_string": { "fields": [ "content", "name.*^5" ], "query": "this AND that OR thus" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3e5edac5b461020017fd9d8ec7a91fa.asciidoc000066400000000000000000000013011506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/managing-roles.asciidoc:262 [source, python] ---- resp = client.security.put_role( name="clicks_admin", run_as=[ "clicks_watcher_1" ], cluster=[ "monitor" ], indices=[ { "names": [ "events-*" ], "privileges": [ "read" ], "field_security": { "grant": [ "category", "@timestamp", "message" ] }, "query": "{\"match\": {\"category\": \"click\"}}" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d3e9e1169c3514fd46e253cd8b5ae3cb.asciidoc000066400000000000000000000014321506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/predicate-tokenfilter.asciidoc:102 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "my_script_filter" ] } }, "filter": { "my_script_filter": { "type": "predicate_token_filter", "script": { "source": "\n token.type.contains(\"ALPHANUM\")\n " } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4158d486e7fee2702a14068b69e3b33.asciidoc000066400000000000000000000157201506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/downsampling-dsl.asciidoc:45 [source, python] ---- resp = client.indices.put_index_template( name="datastream_template", index_patterns=[ "datastream*" ], data_stream={}, template={ "lifecycle": { "downsampling": [ { "after": "1m", "fixed_interval": "1h" } ] }, "settings": { "index": { "mode": "time_series" } }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "kubernetes": { "properties": { "container": { "properties": { "cpu": { "properties": { "usage": { "properties": { "core": { "properties": { "ns": { "type": "long" } } }, "limit": { "properties": { "pct": { "type": "float" } } }, "nanocores": { "type": "long", "time_series_metric": "gauge" }, "node": { "properties": { "pct": { "type": "float" } } } } } } }, "memory": { "properties": { "available": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "majorpagefaults": { "type": "long" }, "pagefaults": { "type": "long", "time_series_metric": "gauge" }, "rss": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } }, "usage": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" }, "limit": { "properties": { "pct": { "type": "float" } } }, "node": { "properties": { "pct": { "type": "float" } } } } }, "workingset": { "properties": { "bytes": { "type": "long", "time_series_metric": "gauge" } } } } }, "name": { "type": "keyword" }, "start_time": { "type": "date" } } }, "host": { "type": "keyword", "time_series_dimension": True }, "namespace": { "type": "keyword", "time_series_dimension": True }, "node": { "type": "keyword", "time_series_dimension": True }, "pod": { "type": "keyword", "time_series_dimension": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4323be84152fa91abd76e966d4751dc.asciidoc000066400000000000000000000004541506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:474 [source, python] ---- resp = client.security.query_api_keys( query={ "term": { "name": { "value": "application-key-1" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d443db2755fde3b49ca3a9d296c4a96f.asciidoc000066400000000000000000000010111506101734100266430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc:120 [source, python] ---- resp = client.indices.create( index="delimited_payload", settings={ "analysis": { "analyzer": { "whitespace_delimited_payload": { "tokenizer": "whitespace", "filter": [ "delimited_payload" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d44ecc69090c0b2bc08a6cbc2e3467c5.asciidoc000066400000000000000000000006631506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significanttext-aggregation.asciidoc:153 [source, python] ---- resp = client.search( index="news", query={ "simple_query_string": { "query": "+elasticsearch +pozmantier" } }, source=[ "title", "source" ], highlight={ "fields": { "content": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d46e9739bbf25eb2f7225f58ab08b2a7.asciidoc000066400000000000000000000005021506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-complete-logout-api.asciidoc:89 [source, python] ---- resp = client.security.saml_complete_logout( realm="saml1", ids=[ "_1c368075e0b3..." ], content="PHNhbWxwOkxvZ291dFJlc3BvbnNlIHhtbG5zOnNhbWxwPSJ1cm46...", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d48b274a4b6098ffef0c016c6c945fb9.asciidoc000066400000000000000000000003561506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-tokens.asciidoc:222 [source, python] ---- resp = client.security.get_token( grant_type="refresh_token", refresh_token="vLBPvmAB6KvwvJZr27cS", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d49318764244113ad2ac4cc0f06d77ec.asciidoc000066400000000000000000000010611506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1034 [source, python] ---- resp = client.indices.create( index="image-index", mappings={ "properties": { "image-vector": { "type": "dense_vector", "dims": 3, "similarity": "l2_norm", "index_options": { "type": "hnsw", "m": 32, "ef_construction": 100 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4a41fb74b41b41a0ee114a2311f2815.asciidoc000066400000000000000000000006311506101734100263540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:245 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_age": "7d" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4b405ef0302227e050ac8f0e39068e1.asciidoc000066400000000000000000000006031506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/evaluate-dfanalytics.asciidoc:259 [source, python] ---- resp = client.ml.evaluate_data_frame( index="my_analytics_dest_index", evaluation={ "outlier_detection": { "actual_field": "is_outlier", "predicted_probability_field": "ml.outlier_score" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4b50ae96e541c0031264a10f6afccbf.asciidoc000066400000000000000000000003251506101734100266070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:336 [source, python] ---- resp = client.indices.migrate_to_data_stream( name="my-time-series-data", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4cdcf01014c75693b080c778071c1b5.asciidoc000066400000000000000000000005351506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/stats-aggregation.asciidoc:102 [source, python] ---- resp = client.search( index="exams", size="0", aggs={ "grades_stats": { "stats": { "field": "grade", "missing": 0 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4d450f536d747d5ef5050d2d8c66f09.asciidoc000066400000000000000000000013641506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:93 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": { "_id": 1 } }, { "user": { "id": "kimchy" }, "@timestamp": "2099-11-15T14:12:12", "message": "trying out Elasticsearch" }, { "index": { "_id": 2 } }, { "user": { "id": "kimchi" }, "@timestamp": "2099-11-15T14:12:13", "message": "My user ID is similar to kimchy!" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4df39f72d3a3b80cd4042f6a21c3f19.asciidoc000066400000000000000000000004451506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/put-ip-location-database.asciidoc:40 [source, python] ---- resp = client.ingest.put_ip_location_database( id="my-database-2", configuration={ "name": "standard_location", "ipinfo": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4ef6ac034c4d42cb75d830ec69146e6.asciidoc000066400000000000000000000003451506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc:75 [source, python] ---- resp = client.ccr.delete_auto_follow_pattern( name="my_auto_follow_pattern", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d4fb482a51d67a1af48e429af6019a46.asciidoc000066400000000000000000000012161506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/index-sorting.asciidoc:40 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "sort.field": [ "username", "date" ], "sort.order": [ "asc", "desc" ] } }, mappings={ "properties": { "username": { "type": "keyword", "doc_values": True }, "date": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d50b030edfe6d1128eb76aa5ba9d4e27.asciidoc000066400000000000000000000004571506101734100267110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/put-trained-models-aliases.asciidoc:99 [source, python] ---- resp = client.ml.put_trained_model_alias( model_id="flight-delay-prediction-1580004349800", model_alias="flight_delay_model", reassign=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5132d34ae922fa8e898889b627a1405.asciidoc000066400000000000000000000015141506101734100262760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/children-aggregation.asciidoc:95 [source, python] ---- resp = client.search( index="child_example", size="0", aggs={ "top-tags": { "terms": { "field": "tags.keyword", "size": 10 }, "aggs": { "to-answers": { "children": { "type": "answer" }, "aggs": { "top-names": { "terms": { "field": "owner.display_name.keyword", "size": 10 } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5242b1ab0213f25e5e0742032274ce6.asciidoc000066400000000000000000000012601506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:53 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information", processors=[ { "attachment": { "field": "data", "remove_binary": True } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="attachment", document={ "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/d524db57be9f16abac5396895b9a2a59.asciidoc000066400000000000000000000002601506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/resolve.asciidoc:53 [source, python] ---- resp = client.indices.resolve_index( name="my-index-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d547d55efbf75374f6de1f224323bc73.asciidoc000066400000000000000000000016511506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geo-grid.asciidoc:39 [source, python] ---- resp = client.indices.create( index="geocells", mappings={ "properties": { "geocell": { "type": "geo_shape" } } }, ) print(resp) resp1 = client.ingest.put_pipeline( id="geotile2shape", description="translate rectangular z/x/y geotile to bounding box", processors=[ { "geo_grid": { "field": "geocell", "tile_type": "geotile" } } ], ) print(resp1) resp2 = client.ingest.put_pipeline( id="geohex2shape", description="translate H3 cell to polygon", processors=[ { "geo_grid": { "field": "geocell", "tile_type": "geohex", "target_format": "wkt" } } ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/d5533f08f5cc0479f07a46c761f0786b.asciidoc000066400000000000000000000006601506101734100263550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:327 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "counter": { "type": "integer", "store": False }, "tags": { "type": "keyword", "store": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d56a9d89282df56adbbc34b91390ac17.asciidoc000066400000000000000000000003431506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc:55 [source, python] ---- resp = client.ccr.get_auto_follow_pattern( name="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d59e9cc75814575aa5e275dbe262918c.asciidoc000066400000000000000000000004511506101734100264420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:119 [source, python] ---- resp = client.search( index="my_locations", query={ "geo_grid": { "location": { "geohash": "u0" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5abaf1fd26f0abf410dd8827d077bbf.asciidoc000066400000000000000000000003771506101734100270540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:173 [source, python] ---- resp = client.search( index="my-index-000001", query={ "match_all": {} }, sort=[ "my_id" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5bf9bc08f622ece98632a14a3982e27.asciidoc000066400000000000000000000005041506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:770 [source, python] ---- resp = client.search( query={ "match_all": {} }, script_fields={ "test1": { "script": "params['_source']['message']" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5d0ecf75843ddb5f92cfebd089e53e9.asciidoc000066400000000000000000000005031506101734100270220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:748 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001", "_source": [ "user.id", "_doc" ] }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5dcddc6398b473b6ad9bce5c6adf986.asciidoc000066400000000000000000000003351506101734100271040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:435 [source, python] ---- resp = client.search( scroll="1m", sort=[ "_doc" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d5ead6aacbfbedc8396f87bb34acc880.asciidoc000066400000000000000000000003471506101734100272270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/get-async-eql-search-api.asciidoc:20 [source, python] ---- resp = client.eql.get( id="FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d603e76ab70131f7ec6b08758f95a0e3.asciidoc000066400000000000000000000003021506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/recovery.asciidoc:148 [source, python] ---- resp = client.cat.recovery( v=True, h="i,s,t,ty,st,rep,snap,f,fp,b,bp", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d64679f8a53928fe9958dbe5ee5d9d13.asciidoc000066400000000000000000000013031506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:280 [source, python] ---- resp = client.search( index="my-index-000001", query={ "parent_id": { "type": "answer", "id": "1" } }, aggs={ "parents": { "terms": { "field": "my_join_field#question", "size": 10 } } }, runtime_mappings={ "parent": { "type": "long", "script": "\n emit(Integer.parseInt(doc['my_join_field#question'].value)) \n " } }, fields=[ { "field": "parent" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d64d509440afbed7cefd04b6898962eb.asciidoc000066400000000000000000000010531506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:100 [source, python] ---- resp = client.search( index="my_geoshapes", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "200km", "pin.location": { "lat": 40, "lon": -70 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d66e2b4d1931bf88c72e74670156e43f.asciidoc000066400000000000000000000004451506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-api.asciidoc:332 [source, python] ---- resp = client.search( index="my-index-000001", track_total_hits=100, query={ "match": { "user.id": "elkbee" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d681508a745b2bc777d47ba606d24224.asciidoc000066400000000000000000000002341506101734100262540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/fielddata.asciidoc:158 [source, python] ---- resp = client.cat.fielddata( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d681b643da0d7f0a384f627b6d56111b.asciidoc000066400000000000000000000004631506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/dissect-syntax.asciidoc:89 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "message": { "type": "wildcard" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d690a6af462c70a783625a323e11c72c.asciidoc000066400000000000000000000004711506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:187 [source, python] ---- resp = client.indices.create( index="test-index", settings={ "number_of_shards": 1, "number_of_replicas": 1, "index.lifecycle.name": "my_policy" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d69bd36335774c8ae1286cee21310241.asciidoc000066400000000000000000000010521506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-api-key.asciidoc:72 [source, python] ---- resp = client.security.put_role( name="remote-search", remote_indices=[ { "clusters": [ "my_remote_cluster" ], "names": [ "target-index" ], "privileges": [ "read", "read_cross_cluster", "view_index_metadata" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d69cf7c82602431d9e339583e7dfb988.asciidoc000066400000000000000000000020221506101734100263740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/configuring.asciidoc:10 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "std_english": { "type": "standard", "stopwords": "_english_" } } } }, mappings={ "properties": { "my_text": { "type": "text", "analyzer": "standard", "fields": { "english": { "type": "text", "analyzer": "std_english" } } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", field="my_text", text="The old brown cow", ) print(resp1) resp2 = client.indices.analyze( index="my-index-000001", field="my_text.english", text="The old brown cow", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/d6a21afa4a94b9baa734eac430940bcf.asciidoc000066400000000000000000000003021506101734100267460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connectors-api.asciidoc:95 [source, python] ---- resp = client.connector.list( from_="0", size="2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d6a4548b29e939fb197189c20c7c016f.asciidoc000066400000000000000000000005661506101734100263660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/elastic-infer-service.asciidoc:115 [source, python] ---- resp = client.inference.put( task_type="chat_completion", inference_id="chat-completion-endpoint", inference_config={ "service": "elastic", "service_settings": { "model_id": "model-1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d70f55cd29cdb2dcd775ffa9e23ff393.asciidoc000066400000000000000000000011151506101734100270150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/max-aggregation.asciidoc:52 [source, python] ---- resp = client.search( index="sales", size=0, runtime_mappings={ "price.adjusted": { "type": "double", "script": "\n double price = doc['price'].value;\n if (doc['promoted'].value) {\n price *= 0.8;\n }\n emit(price);\n " } }, aggs={ "max_price": { "max": { "field": "price.adjusted" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7141bd4d0db964f5cc4a872ad79dce9.asciidoc000066400000000000000000000002531506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // features/apis/reset-features-api.asciidoc:20 [source, python] ---- resp = client.features.reset_features() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7348119df9f89a556a7b767d5298c7e.asciidoc000066400000000000000000000013051506101734100264060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geoline-aggregation.asciidoc:218 [source, python] ---- resp = client.search( index="tour", filter_path="aggregations", aggregations={ "path": { "terms": { "field": "city" }, "aggregations": { "museum_tour": { "geo_line": { "point": { "field": "location" }, "sort": { "field": "@timestamp" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7717318d93d0a1f3ad049f9c6604417.asciidoc000066400000000000000000000013461506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/standard-tokenizer.asciidoc:139 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "standard", "max_token_length": 5 } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="The 2 QUICK Brown-Foxes jumped over the lazy dog's bone.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d775836a0d7abecc6637aa988f204c30.asciidoc000066400000000000000000000007141506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:224 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "fullname": "John Doe", "text": "test test test " }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", refresh="wait_for", document={ "fullname": "Jane Doe", "text": "Another test ..." }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d7898526d239d2aea83727fb982f8f77.asciidoc000066400000000000000000000002231506101734100264030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/refresh.asciidoc:119 [source, python] ---- resp = client.indices.refresh() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7919fb6f4d02dde1390775eb8365b79.asciidoc000066400000000000000000000004521506101734100264470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:335 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "my_field": { "type": "text", "fielddata": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7a55a7c491e97079e429483085f1d58.asciidoc000066400000000000000000000007031506101734100262320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:60 [source, python] ---- resp = client.indices.put_index_template( name="dsl-data-stream-template", index_patterns=[ "dsl-data-stream*" ], data_stream={}, priority=500, template={ "settings": { "index.lifecycle.name": "pre-dsl-ilm-policy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7a5b0159ffdcdd1ab9078b38829a08b.asciidoc000066400000000000000000000016451506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/semantic-query.asciidoc:87 [source, python] ---- resp = client.search( index="my-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "term": { "text": "shoes" } } } }, { "standard": { "query": { "semantic": { "field": "semantic_field", "query": "shoes" } } } } ], "rank_window_size": 50, "rank_constant": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7ae456f119246e95f2f4c37e7544b8c.asciidoc000066400000000000000000000003761506101734100264510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/start-datafeed.asciidoc:115 [source, python] ---- resp = client.ml.start_datafeed( datafeed_id="datafeed-low_request_rate", start="2019-04-07T18:22:16Z", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7b61bfb6adb22986a43388b823894cc.asciidoc000066400000000000000000000007111506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:4 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="cohere_embeddings", inference_config={ "service": "cohere", "service_settings": { "api_key": "", "model_id": "embed-english-v3.0", "embedding_type": "byte" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7d92816cac64b7c70d72b0000eeeeea.asciidoc000066400000000000000000000007561506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/field-level-security.asciidoc:77 [source, python] ---- resp = client.security.put_role( name="test_role3", indices=[ { "names": [ "*" ], "privileges": [ "read" ], "field_security": { "grant": [ "customer.handle" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7f42d1b906dc406be1819d17c625d5f.asciidoc000066400000000000000000000010761506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filter-aggregation.asciidoc:83 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", aggs={ "t_shirts": { "filter": { "term": { "type": "t-shirt" } }, "aggs": { "avg_price": { "avg": { "field": "price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d7fe687201ac87b307cd06ed015dd317.asciidoc000066400000000000000000000004571506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:288 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "user_id": { "type": "keyword", "ignore_above": 100 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d803ed00d8f45f81c33e415e1c1ecb8c.asciidoc000066400000000000000000000007551506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:642 [source, python] ---- resp = client.reindex( source={ "index": "my-data-stream", "query": { "range": { "@timestamp": { "gte": "now-7d/d", "lte": "now/d" } } } }, dest={ "index": "new-data-stream", "op_type": "create" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d80ac403d8d936ca9dec185c7da13f2f.asciidoc000066400000000000000000000004621506101734100267210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/apis/create-stored-script-api.asciidoc:17 [source, python] ---- resp = client.put_script( id="my-stored-script", script={ "lang": "painless", "source": "Math.log(_score * 2) + params['my_modifier']" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d8310e5606c61e7a6e64a90838b1a830.asciidoc000066400000000000000000000020001506101734100262440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/parent-aggregation.asciidoc:59 [source, python] ---- resp = client.index( index="parent_example", id="2", routing="1", document={ "join": { "name": "answer", "parent": "1" }, "owner": { "location": "Norfolk, United Kingdom", "display_name": "Sam", "id": 48 }, "body": "Unfortunately you're pretty much limited to FTP...", "creation_date": "2009-05-04T13:45:37.030" }, ) print(resp) resp1 = client.index( index="parent_example", id="3", routing="1", refresh=True, document={ "join": { "name": "answer", "parent": "1" }, "owner": { "location": "Norfolk, United Kingdom", "display_name": "Troll", "id": 49 }, "body": "Use Linux...", "creation_date": "2009-05-05T13:45:37.030" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d8496fa0e5a394fd758617ed6a6c956f.asciidoc000066400000000000000000000007041506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:373 [source, python] ---- resp = client.search( index="my-index-000001", query={ "percolate": { "field": "query", "document": { "message": "The quick brown fox jumps over the lazy dog" } } }, highlight={ "fields": { "message": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d84a861ce563508aeaaf30a9dd84b5cf.asciidoc000066400000000000000000000007601506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:271 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "hot": { "actions": { "rollover": { "max_age": "7d", "max_size": "100gb", "min_docs": 1000 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d851282dba548251d10db5954a339307.asciidoc000066400000000000000000000007151506101734100261730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/paginate-search-results.asciidoc:136 [source, python] ---- resp = client.search( index="twitter", query={ "match": { "title": "elasticsearch" } }, search_after=[ 1463538857, "654323" ], sort=[ { "date": "asc" }, { "tie_breaker_id": "asc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d870d5bd1f97fc75872a298fcddec513.asciidoc000066400000000000000000000103661506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // text-structure/apis/find-structure.asciidoc:101 [source, python] ---- resp = client.text_structure.find_structure( text_files=[ { "name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561 }, { "name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482 }, { "name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604 }, { "name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331 }, { "name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408 }, { "name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454 }, { "name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471 }, { "name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768 }, { "name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585 }, { "name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613 }, { "name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324 }, { "name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328 }, { "name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227 }, { "name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268 }, { "name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224 }, { "name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208 }, { "name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275 }, { "name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180 }, { "name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470 }, { "name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271 }, { "name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311 }, { "name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335 }, { "name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304 }, { "name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d87175daed2327565d4325528c6d8b38.asciidoc000066400000000000000000000002521506101734100262740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:235 [source, python] ---- resp = client.get( index="my-index-000001", id="0", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d87cfcc0a297f75ffe646b2e61940d14.asciidoc000066400000000000000000000007601506101734100265770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/uppercase-tokenfilter.asciidoc:92 [source, python] ---- resp = client.indices.create( index="uppercase_example", settings={ "analysis": { "analyzer": { "whitespace_uppercase": { "tokenizer": "whitespace", "filter": [ "uppercase" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d880630b6f7dc634c4078293f9cd3d80.asciidoc000066400000000000000000000021051506101734100263530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:716 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "size": 2, "sources": [ { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": { "field": "product", "order": "asc" } } } ], "after": { "date": 1494288000000, "product": "mad max" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d88f883ed2fb8be35cd3e72ddffcf4ef.asciidoc000066400000000000000000000013121506101734100272460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/length-tokenfilter.asciidoc:149 [source, python] ---- resp = client.indices.create( index="length_custom_example", settings={ "analysis": { "analyzer": { "whitespace_length_2_to_10_char": { "tokenizer": "whitespace", "filter": [ "length_2_to_10_char" ] } }, "filter": { "length_2_to_10_char": { "type": "length", "min": 2, "max": 10 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d89d36741d906a71eca6c144e8d83889.asciidoc000066400000000000000000000002731506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/tasks.asciidoc:243 [source, python] ---- resp = client.tasks.cancel( task_id="oTUltX4IQMOUUVeiohTt8A:12345", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d8a82511cb94f49b4fe4828fee3ba074.asciidoc000066400000000000000000000003301506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/circuit-breaker-errors.asciidoc:63 [source, python] ---- resp = client.cat.nodes( v=True, h="name,node*,heap*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d8c053ee26c1533ce936ec81101d8e1b.asciidoc000066400000000000000000000003211506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/get-ip-location-database.asciidoc:61 [source, python] ---- resp = client.ingest.get_ip_location_database( id="my-database-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d8c401a5b7359ec65947b9f35ecf6927.asciidoc000066400000000000000000000015171506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/ngram-tokenizer.asciidoc:220 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "ngram", "min_gram": 3, "max_gram": 3, "token_chars": [ "letter", "digit" ] } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="2 Quick Foxes.", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d8ea6a1a1c546bf29f65f8c65439b156.asciidoc000066400000000000000000000007341506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:190 [source, python] ---- resp = client.indices.create( index="byte-image-index", mappings={ "properties": { "byte-image-vector": { "type": "dense_vector", "element_type": "byte", "dims": 2 }, "title": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d8fa7ca2ec8dbfa034603ea566e33f5b.asciidoc000066400000000000000000000021331506101734100267710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/filters-aggregation.asciidoc:208 [source, python] ---- resp = client.search( index="sales", size="0", filter_path="aggregations", aggs={ "the_filter": { "filters": { "keyed": False, "filters": { "t-shirt": { "term": { "type": "t-shirt" } }, "hat": { "term": { "type": "hat" } } } }, "aggs": { "avg_price": { "avg": { "field": "price" } }, "sort_by_avg_price": { "bucket_sort": { "sort": { "avg_price": "asc" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d93d52b6057a7aff3d0766ca44c505e0.asciidoc000066400000000000000000000010771506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:206 [source, python] ---- resp = client.cluster.put_component_template( name="my-aliases", template={ "aliases": { "my-alias": {} } }, ) print(resp) resp1 = client.indices.put_index_template( name="my-index-template", index_patterns=[ "my-index-*" ], composed_of=[ "my-aliases", "my-mappings", "my-settings" ], template={ "aliases": { "yet-another-alias": {} } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/d94f666616dea141dcb7aaf08a35bc10.asciidoc000066400000000000000000000006241506101734100266250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/keep-types-tokenfilter.asciidoc:94 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "keep_types", "types": [ "" ], "mode": "exclude" } ], text="1 quick fox 2 lazy dogs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d952ac7c73219d8cabc080679e035514.asciidoc000066400000000000000000000007761506101734100263520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/search.asciidoc:34 [source, python] ---- resp = client.search( index="my-index", knn={ "field": "my_embeddings.predicted_value", "k": 10, "num_candidates": 100, "query_vector_builder": { "text_embedding": { "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", "model_text": "the query string" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d979f934af0992fb8c8596beff80b638.asciidoc000066400000000000000000000004631506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:530 [source, python] ---- resp = client.search( source=[ "obj1.*", "obj2.*" ], query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d983c1ea730eeabac9e914656d7c9be2.asciidoc000066400000000000000000000021241506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1263 [source, python] ---- resp = client.indices.create( index="latvian_example", settings={ "analysis": { "filter": { "latvian_stop": { "type": "stop", "stopwords": "_latvian_" }, "latvian_keywords": { "type": "keyword_marker", "keywords": [ "piemērs" ] }, "latvian_stemmer": { "type": "stemmer", "language": "latvian" } }, "analyzer": { "rebuilt_latvian": { "tokenizer": "standard", "filter": [ "lowercase", "latvian_stop", "latvian_keywords", "latvian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d98fb2ff2cdd154dff4a576430755d98.asciidoc000066400000000000000000000015761506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1122 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "timestamp": { "type": "date" }, "temperature": { "type": "long" }, "voltage": { "type": "double" }, "node": { "type": "keyword" }, "voltage_corrected": { "type": "double", "on_script_error": "fail", "script": { "source": "\n emit(doc['voltage'].value * params['multiplier'])\n ", "params": { "multiplier": 4 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d9a1ad1c5746b75972c74dd4d3a3d623.asciidoc000066400000000000000000000010221506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/parent-join.asciidoc:442 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_join_field": { "type": "join", "relations": { "question": [ "answer", "comment" ], "answer": "vote" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d9de409a4a197ce7cbe3714e07155d34.asciidoc000066400000000000000000000014141506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/engine.asciidoc:28 [source, python] ---- resp = client.search( query={ "function_score": { "query": { "match": { "body": "foo" } }, "functions": [ { "script_score": { "script": { "source": "pure_df", "lang": "expert_scripts", "params": { "field": "body", "term": "foo" } } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/d9e0cba8e150681d861f5fd1545514e2.asciidoc000066400000000000000000000005511506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:513 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT YEAR(release_date) AS year FROM library WHERE page_count > ? AND author = ? GROUP BY year HAVING COUNT(*) > ?", params=[ 300, "Frank Herbert", 0 ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/da0fe1316e5b8fd68e2a8525bcd8b0f6.asciidoc000066400000000000000000000010401506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/scoring.asciidoc:169 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match": { "body": "elasticsearch" } }, "should": { "rank_feature": { "field": "pagerank", "saturation": { "pivot": 10 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/da18bae37cda566c0254b30c15221b01.asciidoc000066400000000000000000000004151506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-service-token-caches.asciidoc:61 [source, python] ---- resp = client.security.clear_cached_service_tokens( namespace="elastic", service="fleet-server", name="token1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/da24c13eee8c9aeae9a23faf80489e31.asciidoc000066400000000000000000000011471506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:177 [source, python] ---- resp = client.indices.delete( index="my-index", ) print(resp) resp1 = client.reindex( source={ "index": "restored-my-index" }, dest={ "index": "my-index" }, ) print(resp1) resp2 = client.indices.delete_data_stream( name="logs-my_app-default", ) print(resp2) resp3 = client.reindex( source={ "index": "restored-logs-my_app-default" }, dest={ "index": "logs-my_app-default", "op_type": "create" }, ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/da3f280bc65b581fb3097be768061bee.asciidoc000066400000000000000000000004031506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-prepare-authentication-api.asciidoc:96 [source, python] ---- resp = client.security.saml_prepare_authentication( acs="https://kibana.org/api/security/saml/callback", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/da8db0769dff7305f178c12b1111bc99.asciidoc000066400000000000000000000005411506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:262 [source, python] ---- resp = client.search( query={ "simple_query_string": { "query": "this is a test", "fields": [ "subject^3", "message" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/da90e457e2a34fe47dd82a0a2f336095.asciidoc000066400000000000000000000005101506101734100264700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:33 [source, python] ---- resp = client.index( index="networks", id="1", refresh="wait_for", document={ "range": "10.100.0.0/16", "name": "production", "department": "OPS" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/daae2e6acebc84e537764f4ba07f2e6e.asciidoc000066400000000000000000000003601506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // path-settings-overview.asciidoc:75 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.exclude._name": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dabb159e0b3456024889fb9754a10655.asciidoc000066400000000000000000000004531506101734100262630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:76 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "geometry": { "type": "shape" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dabcf0bead37cae1d3e5d2813fd3ccfe.asciidoc000066400000000000000000000004211506101734100273340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/ip.asciidoc:143 [source, python] ---- resp = client.search( index="my-index-000001", query={ "query_string": { "query": "ip_addr:\"2001:db8::/48\"" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dac8ec8547bc446637fd97d9fa872f4f.asciidoc000066400000000000000000000060541506101734100267050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:822 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="flight_prices", source={ "index": [ "kibana_sample_data_flights" ] }, dest={ "index": "kibana_sample_flight_prices" }, analysis={ "regression": { "dependent_variable": "AvgTicketPrice", "num_top_feature_importance_values": 2, "feature_processors": [ { "frequency_encoding": { "field": "DestWeather", "feature_name": "DestWeather_frequency", "frequency_map": { "Rain": 0.14604811155570188, "Heavy Fog": 0.14604811155570188, "Thunder & Lightning": 0.14604811155570188, "Cloudy": 0.14604811155570188, "Damaging Wind": 0.14604811155570188, "Hail": 0.14604811155570188, "Sunny": 0.14604811155570188, "Clear": 0.14604811155570188 } } }, { "target_mean_encoding": { "field": "DestWeather", "feature_name": "DestWeather_targetmean", "target_map": { "Rain": 626.5588814585794, "Heavy Fog": 626.5588814585794, "Thunder & Lightning": 626.5588814585794, "Hail": 626.5588814585794, "Damaging Wind": 626.5588814585794, "Cloudy": 626.5588814585794, "Clear": 626.5588814585794, "Sunny": 626.5588814585794 }, "default_value": 624.0249512020454 } }, { "one_hot_encoding": { "field": "DestWeather", "hot_map": { "Rain": "DestWeather_Rain", "Heavy Fog": "DestWeather_Heavy Fog", "Thunder & Lightning": "DestWeather_Thunder & Lightning", "Cloudy": "DestWeather_Cloudy", "Damaging Wind": "DestWeather_Damaging Wind", "Hail": "DestWeather_Hail", "Clear": "DestWeather_Clear", "Sunny": "DestWeather_Sunny" } } } ] } }, analyzed_fields={ "includes": [ "AvgTicketPrice", "Cancelled", "DestWeather", "FlightDelayMin", "DistanceMiles" ] }, model_memory_limit="30mb", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dad2d4add751fde5c39475ca709cc14b.asciidoc000066400000000000000000000004211506101734100267700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/allocation/filtering.asciidoc:54 [source, python] ---- resp = client.indices.put_settings( index="test", settings={ "index.routing.allocation.include.size": "big,medium" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dadb69a225778ecd6528924c0aa029bb.asciidoc000066400000000000000000000012661506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:85 [source, python] ---- resp = client.indices.create( index="image-index", mappings={ "properties": { "image-vector": { "type": "dense_vector", "dims": 3, "similarity": "l2_norm" }, "title-vector": { "type": "dense_vector", "dims": 5, "similarity": "l2_norm" }, "title": { "type": "text" }, "file-type": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dae57cf7df18adb4dc64426eb159733a.asciidoc000066400000000000000000000010651506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/percentile-aggregation.asciidoc:370 [source, python] ---- resp = client.search( index="latency", size=0, aggs={ "load_time_outlier": { "percentiles": { "field": "load_time", "percents": [ 95, 99, 99.9 ], "hdr": { "number_of_significant_value_digits": 3 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/daf5631eba5285f1b929d5d8d8dc0d50.asciidoc000066400000000000000000000013301506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/uaxurlemail-tokenizer.asciidoc:95 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "my_tokenizer" } }, "tokenizer": { "my_tokenizer": { "type": "uax_url_email", "max_token_length": 5 } } } }, ) print(resp) resp1 = client.indices.analyze( index="my-index-000001", analyzer="my_analyzer", text="john.smith@global-international.com", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/db19cc7a26ca80106d86d688f4be67a8.asciidoc000066400000000000000000000003151506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/stop-dfanalytics.asciidoc:75 [source, python] ---- resp = client.ml.stop_data_frame_analytics( id="loganalytics", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/db773f690edf659ac9b044dc854c77eb.asciidoc000066400000000000000000000032321506101734100266640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-vector-tile-api.asciidoc:671 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" }, "name": { "type": "keyword" }, "price": { "type": "long" }, "included": { "type": "boolean" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": "1" } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum", "price": 1750, "included": True }, { "index": { "_id": "2" } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis", "price": 1500, "included": False }, { "index": { "_id": "3" } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum", "price": 1650, "included": True }, { "index": { "_id": "4" } }, { "location": "POINT (4.914722 52.371667)", "name": "Amsterdam Centre for Architecture", "price": 0, "included": True } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/db8710a9793ae0817a45892d33468160.asciidoc000066400000000000000000000003231506101734100261250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/diskusage.asciidoc:75 [source, python] ---- resp = client.indices.disk_usage( index="my-index-000001", run_expensive_tasks=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/db879dcf70abc4a9a14063a9a2d8d6f5.asciidoc000066400000000000000000000035171506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geohashgrid-aggregation.asciidoc:27 [source, python] ---- resp = client.indices.create( index="museums", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.bulk( index="museums", refresh=True, operations=[ { "index": { "_id": 1 } }, { "location": "POINT (4.912350 52.374081)", "name": "NEMO Science Museum" }, { "index": { "_id": 2 } }, { "location": "POINT (4.901618 52.369219)", "name": "Museum Het Rembrandthuis" }, { "index": { "_id": 3 } }, { "location": "POINT (4.914722 52.371667)", "name": "Nederlands Scheepvaartmuseum" }, { "index": { "_id": 4 } }, { "location": "POINT (4.405200 51.222900)", "name": "Letterenhuis" }, { "index": { "_id": 5 } }, { "location": "POINT (2.336389 48.861111)", "name": "Musée du Louvre" }, { "index": { "_id": 6 } }, { "location": "POINT (2.327000 48.860000)", "name": "Musée d'Orsay" } ], ) print(resp1) resp2 = client.search( index="museums", size="0", aggregations={ "large-grid": { "geohash_grid": { "field": "location", "precision": 3 } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/db9a8e3edee7c9a96ea0875fd4bbaa69.asciidoc000066400000000000000000000002551506101734100271630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // monitoring/collecting-monitoring-data.asciidoc:45 [source, python] ---- resp = client.cluster.get_settings() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dbc50b8c934171e94604575a8b36f349.asciidoc000066400000000000000000000003261506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:151 [source, python] ---- resp = client.indices.forcemerge( index="my-index-000001", max_num_segments="5", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dbcd8892dd01c43d5a60c94173574faf.asciidoc000066400000000000000000000014721506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-field-note.asciidoc:12 [source, python] ---- resp = client.indices.create( index="range_index", settings={ "number_of_shards": 2 }, mappings={ "properties": { "expected_attendees": { "type": "integer_range" }, "time_frame": { "type": "date_range", "format": "yyyy-MM-dd||epoch_millis" } } }, ) print(resp) resp1 = client.index( index="range_index", id="1", refresh=True, document={ "expected_attendees": { "gte": 10, "lte": 20 }, "time_frame": { "gte": "2019-10-28", "lte": "2019-11-04" } }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/dbd1b930782d34d7396fdb2db1216c0d.asciidoc000066400000000000000000000004421506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/ids-query.asciidoc:13 [source, python] ---- resp = client.search( query={ "ids": { "values": [ "1", "4", "100" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dbdd58cdeac9ef20b42ff73e4864e697.asciidoc000066400000000000000000000003151506101734100270220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:251 [source, python] ---- resp = client.indices.get_field_mapping( index="_all", fields="*.id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dbf93d02ab86a09929a21232b19709cc.asciidoc000066400000000000000000000003551506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/stop-trained-model-deployment.asciidoc:73 [source, python] ---- resp = client.ml.stop_trained_model_deployment( model_id="my_model_for_search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dbf9abc37899352751dab0ede62af2fd.asciidoc000066400000000000000000000004501506101734100270030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:121 [source, python] ---- resp = client.security.invalidate_token( token="dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dc33160f4087443f867080a8f5b2cfbd.asciidoc000066400000000000000000000004661506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:176 [source, python] ---- resp = client.esql.query( format="json", query="\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", columnar=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dc3b7603e7d688106acb804059af7834.asciidoc000066400000000000000000000004161506101734100263450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:496 [source, python] ---- resp = client.search( source=False, query={ "match": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dc468865da947b4a9136a5b92878d918.asciidoc000066400000000000000000000006741506101734100263240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-update-api-keys.asciidoc:131 [source, python] ---- resp = client.security.create_api_key( name="my-other-api-key", metadata={ "application": "my-application", "environment": { "level": 2, "trusted": True, "tags": [ "dev", "staging" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dc4dcfeae8a5f248639335c2c9809549.asciidoc000066400000000000000000000003521506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:17 [source, python] ---- resp = client.indices.analyze( tokenizer="path_hierarchy", text="/one/two/three", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dc8c94c9bef1f879282caea5c406f36e.asciidoc000066400000000000000000000004421506101734100267410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:189 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "lowercase" ], char_filter=[ "html_strip" ], text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dcc02ad69da0a5aa10c4e53b34be8ec0.asciidoc000066400000000000000000000005021506101734100270150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:16 [source, python] ---- resp = client.mget( docs=[ { "_index": "my-index-000001", "_id": "1" }, { "_index": "my-index-000001", "_id": "2" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dcee24dba43050e4b01b6e3a3211ce09.asciidoc000066400000000000000000000007021506101734100265740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1281 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "@timestamp": { "format": "strict_date_optional_time||epoch_second", "type": "date" }, "message": { "type": "wildcard" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dcf82f3aacae49c0bb4ccbc673f13e9f.asciidoc000066400000000000000000000017221506101734100272140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1202 [source, python] ---- resp = client.search( index="my-index", size=10, query={ "script_score": { "query": { "knn": { "query_vector": [ 0.04283529, 0.85670587, -0.51402352, 0 ], "field": "my_int4_vector", "num_candidates": 20 } }, "script": { "source": "(dotProduct(params.queryVector, 'my_int4_vector') + 1.0)", "params": { "queryVector": [ 0.04283529, 0.85670587, -0.51402352, 0 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dcfa7f479a33f459a2d222a92e651451.asciidoc000066400000000000000000000020051506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-roles.asciidoc:126 [source, python] ---- resp = client.security.put_role( name="my_admin_role", description="Grants full access to all management features within the cluster.", cluster=[ "all" ], indices=[ { "names": [ "index1", "index2" ], "privileges": [ "all" ], "field_security": { "grant": [ "title", "body" ] }, "query": "{\"match\": {\"title\": \"foo\"}}" } ], applications=[ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], run_as=[ "other_user" ], metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd0b196a099e1cca08c5ce4dd74e935a.asciidoc000066400000000000000000000004531506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:27 [source, python] ---- resp = client.watcher.put_watch( id="cluster_health_watch", trigger={ "schedule": { "interval": "10s" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd16c9c981551c9da47ebb5ef5105fa0.asciidoc000066400000000000000000000027601506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:535 [source, python] ---- resp = client.indices.update_aliases( actions=[ { "add": { "index": ".reindexed-v9-ml-anomalies-custom-example", "alias": ".ml-anomalies-example1", "filter": { "term": { "job_id": { "value": "example1" } } }, "is_hidden": True } }, { "add": { "index": ".reindexed-v9-ml-anomalies-custom-example", "alias": ".ml-anomalies-example2", "filter": { "term": { "job_id": { "value": "example2" } } }, "is_hidden": True } }, { "remove": { "index": ".ml-anomalies-custom-example", "aliases": ".ml-anomalies-*" } }, { "remove_index": { "index": ".ml-anomalies-custom-example" } }, { "add": { "index": ".reindexed-v9-ml-anomalies-custom-example", "alias": ".ml-anomalies-custom-example", "is_hidden": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd1a25d821d0c8deaeaa9c8083152a54.asciidoc000066400000000000000000000002551506101734100266210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/grok.asciidoc:293 [source, python] ---- resp = client.ingest.processor_grok( s=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd3b263e9fa4226e59bedfc957d399d2.asciidoc000066400000000000000000000003501506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/getting-started.asciidoc:22 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library WHERE release_date < '2000-01-01'", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd3ee00ab2af607b32532180d60a41d4.asciidoc000066400000000000000000000012551506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/snowball-tokenfilter.asciidoc:19 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "standard", "filter": [ "lowercase", "my_snow" ] } }, "filter": { "my_snow": { "type": "snowball", "language": "English" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd4f051ab62f0507e3b6e3d6f333e85f.asciidoc000066400000000000000000000002611506101734100265530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-component-template.asciidoc:101 [source, python] ---- resp = client.cluster.get_component_template() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd71b0c9f9197684ff29c61062c55660.asciidoc000066400000000000000000000002471506101734100263030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-settings.asciidoc:38 [source, python] ---- resp = client.security.get_settings() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd7814258121d3c2e576a7f00469d7e3.asciidoc000066400000000000000000000010031506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:197 [source, python] ---- resp = client.ingest.put_pipeline( id="mistral_embeddings_pipeline", processors=[ { "inference": { "model_id": "mistral_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dd792bb53703a57f9207e36d16e26255.asciidoc000066400000000000000000000025021506101734100262640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:1162 [source, python] ---- resp = client.bulk( index="my-index-000001", refresh=True, operations=[ { "index": {} }, { "timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a" }, { "index": {} }, { "timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b" }, { "index": {} }, { "timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a" }, { "index": {} }, { "timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b" }, { "index": {} }, { "timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c" }, { "index": {} }, { "timestamp": 1516297294000, "temperature": 202, "voltage": 4, "node": "c" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dda949d20d07a9edbe64cefc623df945.asciidoc000066400000000000000000000004121506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:472 [source, python] ---- resp = client.indices.put_mapping( index="my_test_scores", properties={ "total_score": { "type": "long" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ddcfa47381d47078dbec651e31b69949.asciidoc000066400000000000000000000004341506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/detect-threats-with-eql.asciidoc:209 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n library where process.name == \"regsvr32.exe\" and dll.name == \"scrobj.dll\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dddb6a6ebd145f8411c5b4910d332f87.asciidoc000066400000000000000000000003121506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:233 [source, python] ---- resp = client.esql.query( query="FROM mv | EVAL b + 2, a + b | LIMIT 4", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dde283eab92608e7bfbfa09c6482a12e.asciidoc000066400000000000000000000003201506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:140 [source, python] ---- resp = client.security.invalidate_api_key( realm_name="native1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dde92fdf3469349ffe2c81764333543a.asciidoc000066400000000000000000000004361506101734100264450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/apis/create-index-from-source.asciidoc:137 [source, python] ---- resp = client.indices.create_from( source="my-index", dest="my-new-index", create_from={ "remove_index_blocks": False }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ddf375e4b6175d830fa4097ea0b41536.asciidoc000066400000000000000000000003161506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/delete-desired-nodes.asciidoc:61 [source, python] ---- resp = client.perform_request( "DELETE", "/_internal/desired_nodes", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ddf56782ecc7eaeb3115e150c4830013.asciidoc000066400000000000000000000007711506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:591 [source, python] ---- resp = client.update_by_query( index="my-index-000001", slice={ "id": 0, "max": 2 }, script={ "source": "ctx._source['extra'] = 'test'" }, ) print(resp) resp1 = client.update_by_query( index="my-index-000001", slice={ "id": 1, "max": 2 }, script={ "source": "ctx._source['extra'] = 'test'" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/de139866a220124360e5e27d1a736ea4.asciidoc000066400000000000000000000012001506101734100262360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:288 [source, python] ---- resp = client.search( query={ "term": { "product": "chocolate" } }, sort=[ { "offer.price": { "mode": "avg", "order": "asc", "nested": { "path": "offer", "filter": { "term": { "offer.color": "blue" } } } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/de2f59887737de3a27716177b60393a2.asciidoc000066400000000000000000000003441506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:245 [source, python] ---- resp = client.indices.analyze( index="analyze_sample", field="obj1.field1", text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/de876505acc75d371d1f6f484c449197.asciidoc000066400000000000000000000003621506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:257 [source, python] ---- resp = client.indices.create( index="test", settings={ "index.write.wait_for_active_shards": "2" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/de90249caeac6f1601a7e7e9f98f1bec.asciidoc000066400000000000000000000005011506101734100270060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-api-key.asciidoc:400 [source, python] ---- resp = client.security.query_api_keys( with_limited_by=True, query={ "ids": { "values": [ "VuaCfGcBCdbkQm-e5aOx" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dea22bb4997e368950f0fc80f2a5f304.asciidoc000066400000000000000000000003361506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/explicit-mapping.asciidoc:123 [source, python] ---- resp = client.indices.get_field_mapping( index="my-index-000001", fields="employee-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dea4ac54c63a10c62eccd7b7f6543b86.asciidoc000066400000000000000000000007771506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:100 [source, python] ---- resp = client.index( index="place", id="1", document={ "suggest": { "input": [ "timmy's", "starbucks", "dunkin donuts" ], "contexts": { "place_type": [ "cafe", "food" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dead0682932ea6ec33c1197017bcb209.asciidoc000066400000000000000000000010571506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:295 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": "dr5r9ydj2y73", "bottom_right": "drj7teegpus6" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dec2af498a7e5892e8fcd09ae779c8f0.asciidoc000066400000000000000000000010471506101734100267600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/iprange-aggregation.asciidoc:61 [source, python] ---- resp = client.search( index="ip_addresses", size=0, aggs={ "ip_ranges": { "ip_range": { "field": "ip", "ranges": [ { "mask": "10.0.0.0/25" }, { "mask": "10.0.0.127/25" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dee3023098d9e63aa9e113beea5686da.asciidoc000066400000000000000000000020661506101734100266440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:789 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"knn\": {\n \"field\": \"{{knn_field}}\",\n \"query_vector\": {{#toJson}}query_vector{{/toJson}},\n \"k\": \"{{k}}\",\n \"num_candidates\": {{num_candidates}}\n },\n \"fields\": {{#toJson}}fields{{/toJson}}\n }\n ", "params": { "knn_field": "image-vector", "query_vector": [], "k": 10, "num_candidates": 100, "fields": [ "title", "file-type" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df04e2e9af66d5e30b1bfdbd458cab13.asciidoc000066400000000000000000000002641506101734100270470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:239 [source, python] ---- resp = client.cat.nodes( v=True, h="heap.max", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df0d27d3abd286b75aef7ddcf0e6c66c.asciidoc000066400000000000000000000017631506101734100271510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/apis/reload-analyzers.asciidoc:116 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "analysis": { "analyzer": { "my_synonyms": { "tokenizer": "whitespace", "filter": [ "synonym" ] } }, "filter": { "synonym": { "type": "synonym_graph", "synonyms_path": "analysis/synonym.txt", "updateable": True } } } } }, mappings={ "properties": { "text": { "type": "text", "analyzer": "standard", "search_analyzer": "my_synonyms" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df103a3df9b353357e72f9180ef421a1.asciidoc000066400000000000000000000005531506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/rare-terms-aggregation.asciidoc:280 [source, python] ---- resp = client.search( aggs={ "genres": { "rare_terms": { "field": "genre", "include": "swi*", "exclude": "electro*" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df1336e768fb6fc1826a5afa30a57285.asciidoc000066400000000000000000000005341506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:61 [source, python] ---- resp = client.index( index="my-data-stream", document={ "@timestamp": "2099-03-08T11:06:07.000Z", "user": { "id": "8a4f500d" }, "message": "Login successful" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df34c8ebaaa59a3ee0e3f28e2443bc30.asciidoc000066400000000000000000000026541506101734100267710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:298 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "properties": { "comments": { "type": "nested" } } }, ) print(resp) resp1 = client.index( index="my-index", id="1", refresh=True, document={ "comments": [ { "author": "kimchy" } ] }, ) print(resp1) resp2 = client.index( index="my-index", id="2", refresh=True, document={ "comments": [ { "author": "kimchy" }, { "author": "nik9000" } ] }, ) print(resp2) resp3 = client.index( index="my-index", id="3", refresh=True, document={ "comments": [ { "author": "nik9000" } ] }, ) print(resp3) resp4 = client.search( index="my-index", query={ "nested": { "path": "comments", "query": { "bool": { "must_not": [ { "term": { "comments.author": "nik9000" } } ] } } } }, ) print(resp4) ---- python-elasticsearch-9.1.1/docs/examples/df7dbac966b67404b8bfa9cdda5ef480.asciidoc000066400000000000000000000002721506101734100270720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/ack-watch.asciidoc:259 [source, python] ---- resp = client.watcher.ack_watch( watch_id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df7ed126d8c92ddd3655c59ce4f305c9.asciidoc000066400000000000000000000003571506101734100266660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/thread_pool.asciidoc:178 [source, python] ---- resp = client.cat.thread_pool( thread_pool_patterns="generic", v=True, h="id,name,active,rejected,completed", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df81b88a2192dd6f9912e0c948a44487.asciidoc000066400000000000000000000006321506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-task.asciidoc:36 [source, python] ---- resp = client.inference.put( task_type="sparse_embedding", inference_id="elser_embeddings", inference_config={ "service": "elasticsearch", "service_settings": { "num_allocations": 1, "num_threads": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/df82a9cb21a7557f3ddba2509f76f608.asciidoc000066400000000000000000000004501506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/fingerprint-tokenfilter.asciidoc:35 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "fingerprint" ], text="zebra jumps over resting resting dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfa16b7300d225e013f23625f44c087b.asciidoc000066400000000000000000000024011506101734100263110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/similarity.asciidoc:194 [source, python] ---- resp = client.indices.create( index="index", settings={ "number_of_shards": 1, "similarity": { "scripted_tfidf": { "type": "scripted", "script": { "source": "double tf = Math.sqrt(doc.freq); double idf = Math.log((field.docCount+1.0)/(term.docFreq+1.0)) + 1.0; double norm = 1/Math.sqrt(doc.length); return query.boost * tf * idf * norm;" } } } }, mappings={ "properties": { "field": { "type": "text", "similarity": "scripted_tfidf" } } }, ) print(resp) resp1 = client.index( index="index", id="1", document={ "field": "foo bar foo" }, ) print(resp1) resp2 = client.index( index="index", id="2", document={ "field": "bar baz" }, ) print(resp2) resp3 = client.indices.refresh( index="index", ) print(resp3) resp4 = client.search( index="index", explain=True, query={ "query_string": { "query": "foo^1.7", "default_field": "field" } }, ) print(resp4) ---- python-elasticsearch-9.1.1/docs/examples/dfa75000edf4b960ed9002595a051871.asciidoc000066400000000000000000000003011506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/migrate-to-data-tiers-routing-guide.asciidoc:139 [source, python] ---- resp = client.ilm.stop() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfb20907cfc5ac520ea3b1dba5f00811.asciidoc000066400000000000000000000004401506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/example-watches/example-watch-clusterstatus.asciidoc:115 [source, python] ---- resp = client.search( index=".watcher-history*", sort=[ { "result.execution_time": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfb641d2d3155669ad6fb5a424dabf4f.asciidoc000066400000000000000000000003071506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/migrate-to-data-tiers-routing-guide.asciidoc:158 [source, python] ---- resp = client.ilm.get_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfbf53781adc6640493d49931a352167.asciidoc000066400000000000000000000015151506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/enabled.asciidoc:64 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "enabled": False }, ) print(resp) resp1 = client.index( index="my-index-000001", id="session_1", document={ "user_id": "kimchy", "session_data": { "arbitrary_object": { "some_array": [ "foo", "bar", { "baz": 2 } ] } }, "last_updated": "2015-12-06T18:20:22" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="session_1", ) print(resp2) resp3 = client.indices.get_mapping( index="my-index-000001", ) print(resp3) ---- python-elasticsearch-9.1.1/docs/examples/dfcc83efefaddccfe5dce0695c2266ef.asciidoc000066400000000000000000000004631506101734100273770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/nested-query.asciidoc:23 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "obj1": { "type": "nested" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfcdcd3ea6753dcc391a4a52cf640527.asciidoc000066400000000000000000000016551506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-desired-nodes.asciidoc:118 [source, python] ---- resp = client.perform_request( "PUT", "/_internal/desired_nodes/Ywkh3INLQcuPT49f6kcppA/101", headers={"Content-Type": "application/json"}, body={ "nodes": [ { "settings": { "node.name": "instance-000187", "node.external_id": "instance-000187", "node.roles": [ "data_hot", "master" ], "node.attr.data": "hot", "node.attr.logical_availability_zone": "zone-0" }, "processors_range": { "min": 8, "max": 10 }, "memory": "58gb", "storage": "2tb" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfce1be1d035aff0b8fdf4a8839f7795.asciidoc000066400000000000000000000006441506101734100270230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/update-trained-model-deployment.asciidoc:121 [source, python] ---- resp = client.ml.update_trained_model_deployment( model_id="elastic__distilbert-base-uncased-finetuned-conll03-english", adaptive_allocations={ "enabled": True, "min_number_of_allocations": 3, "max_number_of_allocations": 10 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dfdf82b8d99436582f150117695190b3.asciidoc000066400000000000000000000011151506101734100262160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/children-aggregation.asciidoc:39 [source, python] ---- resp = client.index( index="child_example", id="1", document={ "join": { "name": "question" }, "body": "I have Windows 2003 server and i bought a new Windows 2008 server...", "title": "Whats the best way to file transfer my site from server to a newer one?", "tags": [ "windows-server-2003", "windows-server-2008", "file-transfer" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dff61a76d5ef9ca8cbe59a416269a84b.asciidoc000066400000000000000000000002771506101734100267460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/delete-pipeline.asciidoc:34 [source, python] ---- resp = client.ingest.delete_pipeline( id="my-pipeline-id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/dffbbdc4025e5777c647d8818847b960.asciidoc000066400000000000000000000003271506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-api-keys.asciidoc:275 [source, python] ---- resp = client.security.get_api_key( id="VuaCfGcBCdbkQm-e5aOx", owner=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e017c2de6f93a8dd97f5c6e002dd5c4f.asciidoc000066400000000000000000000013471506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/post-calendar-event.asciidoc:132 [source, python] ---- resp = client.ml.post_calendar_events( calendar_id="dst-germany", events=[ { "description": "Fall 2024", "start_time": 1729994400000, "end_time": 1730167200000, "skip_result": False, "skip_model_update": False, "force_time_shift": -3600 }, { "description": "Spring 2025", "start_time": 1743296400000, "end_time": 1743469200000, "skip_result": False, "skip_model_update": False, "force_time_shift": 3600 } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e04267ffc50d916800b919c6cdc9622a.asciidoc000066400000000000000000000003731506101734100264250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/ignore-above.asciidoc:74 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.mapping.ignore_above": 256 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e0734215054e1ff5df712ce3a826cdba.asciidoc000066400000000000000000000004261506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:604 [source, python] ---- resp = client.indices.delete( index="my-index", ) print(resp) resp1 = client.indices.delete_data_stream( name="logs-my_app-default", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e08fb1435dc659c24badf25b676efb68.asciidoc000066400000000000000000000005431506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/index-prefixes.asciidoc:21 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "body_text": { "type": "text", "index_prefixes": {} } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e095fc96504efecc588f97673912e3d3.asciidoc000066400000000000000000000024431506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/put-job.asciidoc:420 [source, python] ---- resp = client.ml.put_job( job_id="test-job1", pretty=True, analysis_config={ "bucket_span": "15m", "detectors": [ { "detector_description": "Sum of bytes", "function": "sum", "field_name": "bytes" } ] }, data_description={ "time_field": "timestamp", "time_format": "epoch_ms" }, analysis_limits={ "model_memory_limit": "11MB" }, model_plot_config={ "enabled": True, "annotations_enabled": True }, results_index_name="test-job1", datafeed_config={ "indices": [ "kibana_sample_data_logs" ], "query": { "bool": { "must": [ { "match_all": {} } ] } }, "runtime_mappings": { "hour_of_day": { "type": "long", "script": { "source": "emit(doc['timestamp'].value.getHour());" } } }, "datafeed_id": "datafeed-test-job1" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e09d30195108bd6a1f6857394a6123ea.asciidoc000066400000000000000000000004111506101734100262500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/reverse-tokenfilter.asciidoc:24 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "reverse" ], text="quick fox jumps", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e09ee13ce253c7892dd5ef076fbfbba5.asciidoc000066400000000000000000000011101506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/remove-duplicates-tokenfilter.asciidoc:136 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_analyzer": { "tokenizer": "standard", "filter": [ "keyword_repeat", "stemmer", "remove_duplicates" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e0a7c730ef0f22e3edffe9a254bc56e7.asciidoc000066400000000000000000000010411506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:240 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001", "slice": { "id": 0, "max": 2 } }, dest={ "index": "my-new-index-000001" }, ) print(resp) resp1 = client.reindex( source={ "index": "my-index-000001", "slice": { "id": 1, "max": 2 } }, dest={ "index": "my-new-index-000001" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e0b2f56c34e33ff52f8f9658be2f7ca1.asciidoc000066400000000000000000000002531506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/stats.asciidoc:111 [source, python] ---- resp = client.indices.stats( index="index1,index2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e0bbfb368eae307e9508ab8d6e9cf23c.asciidoc000066400000000000000000000002571506101734100270100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/fielddata.asciidoc:108 [source, python] ---- resp = client.cat.fielddata( v=True, fields="body", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e0d4a800de2d8f4062e69433586c38db.asciidoc000066400000000000000000000006221506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/saml-complete-logout-api.asciidoc:75 [source, python] ---- resp = client.security.saml_complete_logout( realm="saml1", ids=[ "_1c368075e0b3..." ], query_string="SAMLResponse=fZHLasMwEEVbfb1bf...&SigAlg=http%3A%2F%2Fwww.w3.org%2F2000%2F09%2Fxmldsig%23rsa-sha1&Signature=CuCmFn%2BLqnaZGZJqK...", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e0fcef99656799de6b88117d56f131e2.asciidoc000066400000000000000000000004451506101734100264610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:276 [source, python] ---- resp = client.explain( index="my-index-000001", id="0", query={ "match": { "message": "elasticsearch" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1220f2c28db6ef0233e26e6bd3866fa.asciidoc000066400000000000000000000024211506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/tophits-aggregation.asciidoc:427 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "top_tags": { "terms": { "field": "type", "size": 3 }, "aggs": { "top_sales_hits": { "top_hits": { "sort": [ { "date": { "order": "desc" } } ], "_source": { "includes": [ "date", "price" ] }, "size": 1 } }, "having.top_salary": { "bucket_selector": { "buckets_path": { "tp": "top_sales_hits[_source.price]" }, "script": "params.tp < 180" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e12f2d2ddca387630e7855a6db952da2.asciidoc000066400000000000000000000017031506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:180 [source, python] ---- resp = client.search( index="sales", runtime_mappings={ "price.euros": { "type": "double", "script": { "source": "\n emit(doc['price'].value * params.conversion_rate)\n ", "params": { "conversion_rate": 0.835526591 } } } }, aggs={ "price_ranges": { "range": { "field": "price.euros", "ranges": [ { "to": 100 }, { "from": 100, "to": 200 }, { "from": 200 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1337c6b76defd5a46d05220f9d9c9fc.asciidoc000066400000000000000000000004061506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-tokens.asciidoc:134 [source, python] ---- resp = client.security.get_token( grant_type="password", username="test_admin", password="x-pack-test-password", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e14a5a5a1c880031486bfff43031fa3a.asciidoc000066400000000000000000000003121506101734100264450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/circuit-breaker-errors.asciidoc:71 [source, python] ---- resp = client.nodes.stats( metric="breaker", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e16a353e619b935c5c70769b1b9fa100.asciidoc000066400000000000000000000011251506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/composite-aggregation.asciidoc:458 [source, python] ---- resp = client.search( size=0, aggs={ "my_buckets": { "composite": { "sources": [ { "tile": { "geotile_grid": { "field": "location", "precision": 8 } } } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1874cc7cd22b6860ca8b11bde3c70c1.asciidoc000066400000000000000000000010211506101734100266130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:227 [source, python] ---- resp = client.search( index="index2", query={ "query_string": { "query": "running with scissors", "fields": [ "comment", "comment.english" ] } }, highlight={ "order": "score", "fields": { "comment": { "type": "fvh" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e194e9cbe3eb2305f4f7cdda0cf529bd.asciidoc000066400000000000000000000007051506101734100270610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/misc.asciidoc:10 [source, python] ---- resp = client.search( typed_keys=True, suggest={ "text": "some test mssage", "my-first-suggester": { "term": { "field": "message" } }, "my-second-suggester": { "phrase": { "field": "message" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e19f5e3724d9f3f36a817b9a811ca42e.asciidoc000066400000000000000000000012271506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline.asciidoc:62 [source, python] ---- resp = client.search( aggs={ "my_date_histo": { "date_histogram": { "field": "timestamp", "calendar_interval": "day" }, "aggs": { "the_sum": { "sum": { "field": "lemmings" } }, "the_deriv": { "derivative": { "buckets_path": "the_sum" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1c08f5774e81da31cd75aa1bdc2c548.asciidoc000066400000000000000000000015251506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/percolate-query.asciidoc:688 [source, python] ---- resp = client.search( index="my-index-000001", query={ "bool": { "should": [ { "percolate": { "field": "query", "document": { "message": "bonsai tree" }, "name": "query1" } }, { "percolate": { "field": "query", "document": { "message": "tulip flower" }, "name": "query2" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1d6ecab4148b09f4c605474157e7dbd.asciidoc000066400000000000000000000003171506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:305 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1f20ee96ce80edcc35b647cef731e15.asciidoc000066400000000000000000000004551506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/match-enrich-policy-type-ex.asciidoc:101 [source, python] ---- resp = client.index( index="my-index-000001", id="my_id", pipeline="user_lookup", document={ "email": "mardy.brown@asciidocsmith.com" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e1f6ea7c0937cf7e6ea7e8209e52e8bb.asciidoc000066400000000000000000000004321506101734100267370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/index-sorting.asciidoc:158 [source, python] ---- resp = client.search( index="events", size=10, sort=[ { "timestamp": "desc" } ], track_total_hits=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e22a1da3c622611be6855e534c0709ae.asciidoc000066400000000000000000000004041506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-rules/apis/test-query-ruleset.asciidoc:117 [source, python] ---- resp = client.query_rules.test( ruleset_id="my-ruleset", match_criteria={ "query_string": "puggles" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e26c96978096ccc592849cca9db67ffc.asciidoc000066400000000000000000000003731506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // shard-request-cache.asciidoc:74 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index.requests.cache.enable": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e26e8bfa68aa4ab265b22304c38c3aef.asciidoc000066400000000000000000000042131506101734100267020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/esql/esql-getting-started-sample-data.asciidoc:7 [source, python] ---- resp = client.indices.create( index="sample_data", mappings={ "properties": { "client_ip": { "type": "ip" }, "message": { "type": "keyword" } } }, ) print(resp) resp1 = client.bulk( index="sample_data", operations=[ { "index": {} }, { "@timestamp": "2023-10-23T12:15:03.360Z", "client_ip": "172.21.2.162", "message": "Connected to 10.1.0.3", "event_duration": 3450233 }, { "index": {} }, { "@timestamp": "2023-10-23T12:27:28.948Z", "client_ip": "172.21.2.113", "message": "Connected to 10.1.0.2", "event_duration": 2764889 }, { "index": {} }, { "@timestamp": "2023-10-23T13:33:34.937Z", "client_ip": "172.21.0.5", "message": "Disconnected", "event_duration": 1232382 }, { "index": {} }, { "@timestamp": "2023-10-23T13:51:54.732Z", "client_ip": "172.21.3.15", "message": "Connection error", "event_duration": 725448 }, { "index": {} }, { "@timestamp": "2023-10-23T13:52:55.015Z", "client_ip": "172.21.3.15", "message": "Connection error", "event_duration": 8268153 }, { "index": {} }, { "@timestamp": "2023-10-23T13:53:55.832Z", "client_ip": "172.21.3.15", "message": "Connection error", "event_duration": 5033755 }, { "index": {} }, { "@timestamp": "2023-10-23T13:55:01.543Z", "client_ip": "172.21.3.15", "message": "Connected to 10.1.0.1", "event_duration": 1756467 } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e270f3f721a5712cd11a5ca03554f5b0.asciidoc000066400000000000000000000006221506101734100263660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:171 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "Will Smith", "type": "best_fields", "fields": [ "first_name", "last_name" ], "operator": "and" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e273060a675c959fd5f3cde27c8aff07.asciidoc000066400000000000000000000005041506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/disk-usage.asciidoc:14 [source, python] ---- resp = client.indices.create( index="index", mappings={ "properties": { "foo": { "type": "integer", "index": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2750d69bcb6d4c7e16e704cd0fb3530.asciidoc000066400000000000000000000010111506101734100265420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:67 [source, python] ---- resp = client.indices.create( index="test", mappings={ "properties": { "pagerank": { "type": "rank_feature" }, "url_length": { "type": "rank_feature", "positive_score_impact": False }, "topics": { "type": "rank_features" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2883c88b5ceca9fce1e70e716d80025.asciidoc000066400000000000000000000004711506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/version.asciidoc:19 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_version": { "type": "version" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2a22c6fd58cc0becf4c383134a08f8b.asciidoc000066400000000000000000000010721506101734100267060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/intervals-query.asciidoc:455 [source, python] ---- resp = client.search( query={ "intervals": { "my_text": { "match": { "query": "salty", "filter": { "contained_by": { "match": { "query": "hot porridge" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2a753029b450942a3228e3003a55a7d.asciidoc000066400000000000000000000006431506101734100261570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/apis/put-lifecycle.asciidoc:111 [source, python] ---- resp = client.indices.put_data_lifecycle( name="my-weather-sensor-data-stream", downsampling=[ { "after": "1d", "fixed_interval": "10m" }, { "after": "7d", "fixed_interval": "1d" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2a7d127b82ddebb690a959dcd0cbc09.asciidoc000066400000000000000000000007501506101734100267750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/elision-tokenfilter.asciidoc:96 [source, python] ---- resp = client.indices.create( index="elision_example", settings={ "analysis": { "analyzer": { "whitespace_elision": { "tokenizer": "whitespace", "filter": [ "elision" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2b4867a9f72bda87ebaa3608d3fba4c.asciidoc000066400000000000000000000012771506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:354 [source, python] ---- resp = client.render_search_template( source={ "query": { "range": { "user.effective.date": { "gte": "{{date.min}}", "lte": "{{date.max}}", "format": "{{#join delimiter='||'}}date.formats{{/join delimiter='||'}}" } } } }, params={ "date": { "min": "2098", "max": "06/05/2099", "formats": [ "dd/MM/yyyy", "yyyy" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2bcc8f4ed2b4de82729e7a5a7c8f634.asciidoc000066400000000000000000000002561506101734100267400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // synonyms/apis/list-synonyms-sets.asciidoc:86 [source, python] ---- resp = client.synonyms.get_synonyms_sets() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2d8cf24a12053eb09fec7087cdab43a.asciidoc000066400000000000000000000014601506101734100267030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/normalize-aggregation.asciidoc:95 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } }, "percent_of_total_sales": { "normalize": { "buckets_path": "sales", "method": "percent_of_sum", "format": "00.00%" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e2ec9e867f7141b304b53ebc59098f2a.asciidoc000066400000000000000000000003141506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/update-api-key.asciidoc:258 [source, python] ---- resp = client.security.update_api_key( id="VuaCfGcBCdbkQm-e5aOx", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e3019fd5f23458ae49ad9854c97d321c.asciidoc000066400000000000000000000003351506101734100264360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/oidc-prepare-authentication-api.asciidoc:78 [source, python] ---- resp = client.security.oidc_prepare_authentication( realm="oidc1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e308899a306e61d1a590868308689955.asciidoc000066400000000000000000000012611506101734100260150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/ip-location.asciidoc:136 [source, python] ---- resp = client.ingest.put_pipeline( id="ip_location", description="Add ip geolocation info", processors=[ { "ip_location": { "field": "ip", "target_field": "geo", "database_file": "GeoLite2-Country.mmdb" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="ip_location", document={ "ip": "89.160.20.128" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/e30ea6e3823a139d7693d8cce1920a06.asciidoc000066400000000000000000000005201506101734100264110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/multi-match-query.asciidoc:50 [source, python] ---- resp = client.search( query={ "multi_match": { "query": "this is a test", "fields": [ "subject^3", "message" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e316271f668c9889bf548311fb421f1e.asciidoc000066400000000000000000000005601506101734100263000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:846 [source, python] ---- resp = client.search( aggs={ "ip_addresses": { "terms": { "field": "destination_ip", "missing": "0.0.0.0", "value_type": "ip" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e317a8380dfbc76c4e7f23d0997b3518.asciidoc000066400000000000000000000003641506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:524 [source, python] ---- resp = client.cluster.put_settings( persistent={ "action.destructive_requires_name": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e324ea1547635180c31c1adf77870ba2.asciidoc000066400000000000000000000021201506101734100263150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/tsds-reindex.asciidoc:249 [source, python] ---- resp = client.cluster.put_component_template( name="destination_template", template={ "settings": { "index": { "number_of_replicas": 2, "number_of_shards": 2, "mode": "time_series", "routing_path": [ "metricset" ] } }, "mappings": { "properties": { "@timestamp": { "type": "date" }, "metricset": { "type": "keyword", "time_series_dimension": True }, "k8s": { "properties": { "tx": { "type": "long" }, "rx": { "type": "long" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e35abc9403e4aef7d538ab29ccc363b3.asciidoc000066400000000000000000000004171506101734100267130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/prevalidate-node-removal.asciidoc:111 [source, python] ---- resp = client.perform_request( "POST", "/_internal/prevalidate_node_removal", params={ "names": "node1,node2" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e3678142aec988e2ff0ae5d934dc39e9.asciidoc000066400000000000000000000037611506101734100266160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-point.asciidoc:28 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "location": { "type": "geo_point" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Geopoint as an object using GeoJSON format", "location": { "type": "Point", "coordinates": [ -71.34, 41.12 ] } }, ) print(resp1) resp2 = client.index( index="my-index-000001", id="2", document={ "text": "Geopoint as a WKT POINT primitive", "location": "POINT (-71.34 41.12)" }, ) print(resp2) resp3 = client.index( index="my-index-000001", id="3", document={ "text": "Geopoint as an object with 'lat' and 'lon' keys", "location": { "lat": 41.12, "lon": -71.34 } }, ) print(resp3) resp4 = client.index( index="my-index-000001", id="4", document={ "text": "Geopoint as an array", "location": [ -71.34, 41.12 ] }, ) print(resp4) resp5 = client.index( index="my-index-000001", id="5", document={ "text": "Geopoint as a string", "location": "41.12,-71.34" }, ) print(resp5) resp6 = client.index( index="my-index-000001", id="6", document={ "text": "Geopoint as a geohash", "location": "drm3btev3e86" }, ) print(resp6) resp7 = client.search( index="my-index-000001", query={ "geo_bounding_box": { "location": { "top_left": { "lat": 42, "lon": -72 }, "bottom_right": { "lat": 40, "lon": -74 } } } }, ) print(resp7) ---- python-elasticsearch-9.1.1/docs/examples/e375c7da666276c4df6664c6821cd5f4.asciidoc000066400000000000000000000012051506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-vectors.asciidoc:17 [source, python] ---- resp = client.indices.create( index="my-rank-vectors-float", mappings={ "properties": { "my_vector": { "type": "rank_vectors" } } }, ) print(resp) resp1 = client.index( index="my-rank-vectors-float", id="1", document={ "my_vector": [ [ 0.5, 10, 6 ], [ -0.5, 10, 10 ] ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e3a6462ca79c101314da0680c97678cd.asciidoc000066400000000000000000000012021506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrieve-selected-fields.asciidoc:734 [source, python] ---- resp = client.search( query={ "match_all": {} }, script_fields={ "test1": { "script": { "lang": "painless", "source": "doc['price'].value * 2" } }, "test2": { "script": { "lang": "painless", "source": "doc['price'].value * params.factor", "params": { "factor": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e3b3a8ae12ab947ad3ba96eb228402ca.asciidoc000066400000000000000000000004321506101734100266720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/store.asciidoc:122 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.store.preload": [ "nvd", "dvd" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e3f2f6ee3e312b8a90634827ae954d70.asciidoc000066400000000000000000000015351506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:421 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "GeometryCollection", "geometries": [ { "type": "Point", "coordinates": [ 100, 0 ] }, { "type": "LineString", "coordinates": [ [ 101, 0 ], [ 102, 1 ] ] } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e3fe842951dc873d7d00c8f6a010c53f.asciidoc000066400000000000000000000003731506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/task-queue-backlog.asciidoc:90 [source, python] ---- resp = client.tasks.list( human=True, detailed=True, actions="indices:data/write/search", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4193867485595c9c92f909a052d2a90.asciidoc000066400000000000000000000007341506101734100261530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/has-parent-query.asciidoc:27 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my-join-field": { "type": "join", "relations": { "parent": "child" } }, "tag": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e41a9bac42d0c1cb103674ae9039b7af.asciidoc000066400000000000000000000006201506101734100266110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:234 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "numeric_detection": True }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "my_float": "1.0", "my_integer": "1" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e441cb3be3c2f007621ee1f8c9a2e0ef.asciidoc000066400000000000000000000005621506101734100267040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/matrix-stats-aggregation.asciidoc:45 [source, python] ---- resp = client.search( aggs={ "statistics": { "matrix_stats": { "fields": [ "poverty", "income" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e451900efbd8be50c2b8347a83816aa6.asciidoc000066400000000000000000000013151506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc:44 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "stats_monthly_sales": { "extended_stats_bucket": { "buckets_path": "sales_per_month>sales" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e46c83db1580e14be844079cd008f518.asciidoc000066400000000000000000000005011506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/enable-index-allocation.asciidoc:130 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "routing.allocation.enable": "all" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e47a71a2e314dbbee5db8142a23957ce.asciidoc000066400000000000000000000006431506101734100266310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:621 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "description": "Index the ingest timestamp as 'event.ingested'", "field": "event.ingested", "value": "{{{_ingest.timestamp}}}" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e48e7da65c2b32d724fd7e3bfa175c6f.asciidoc000066400000000000000000000003761506101734100267340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-overall-buckets.asciidoc:136 [source, python] ---- resp = client.ml.get_overall_buckets( job_id="job-*", overall_score=80, start="1403532000000", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e494162e83ce041c56b2e2bc29d33474.asciidoc000066400000000000000000000006271506101734100263410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:394 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n sequence by process.pid with maxspan=1h\n [ process where process.name == \"regsvr32.exe\" ]\n [ file where stringContains(file.name, \"scrobj.dll\") ]\n until [ process where event.type == \"termination\" ]\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4b2b5e0aaedf3cbbcde3d61eb1f13fc.asciidoc000066400000000000000000000003521506101734100273270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/refresh.asciidoc:108 [source, python] ---- resp = client.index( index="test", id="4", refresh="wait_for", document={ "test": "test" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4b38973c74037335378d8480f1ce894.asciidoc000066400000000000000000000017031506101734100261510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/apis/simulate-ingest.asciidoc:435 [source, python] ---- resp = client.simulate.ingest( docs=[ { "_index": "my-index", "_id": "123", "_source": { "foo": "foo" } }, { "_index": "my-index", "_id": "456", "_source": { "bar": "rab" } } ], component_template_substitutions={ "my-mappings_template": { "template": { "mappings": { "dynamic": "strict", "properties": { "foo": { "type": "keyword" }, "bar": { "type": "keyword" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4b64b8277af259a52c8d3940157b5fa.asciidoc000066400000000000000000000023461506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/painless-examples.asciidoc:402 [source, python] ---- resp = client.transform.put_transform( transform_id="data_log", source={ "index": "kibana_sample_data_logs" }, dest={ "index": "data-logs-by-client" }, pivot={ "group_by": { "machine.os": { "terms": { "field": "machine.os.keyword" } }, "machine.ip": { "terms": { "field": "clientip" } } }, "aggregations": { "time_frame.lte": { "max": { "field": "timestamp" } }, "time_frame.gte": { "min": { "field": "timestamp" } }, "time_length": { "bucket_script": { "buckets_path": { "min": "time_frame.gte.value", "max": "time_frame.lte.value" }, "script": "params.max - params.min" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4b6a6a921c97b4c0bbe97bd89f4cf33.asciidoc000066400000000000000000000003171506101734100267310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/promote-data-stream-api.asciidoc:32 [source, python] ---- resp = client.indices.promote_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4be53736bcc02b03068fd72fdbfe271.asciidoc000066400000000000000000000004061506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:114 [source, python] ---- resp = client.indices.put_mapping( index="publications", properties={ "title": { "type": "text" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4d1f01c025fb797a1d87f372760eabf.asciidoc000066400000000000000000000003141506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/hotspotting.asciidoc:271 [source, python] ---- resp = client.tasks.list( human=True, detailed=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4de6035653e8202c43631f02d244661.asciidoc000066400000000000000000000006151506101734100261060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-across-clusters.asciidoc:127 [source, python] ---- resp = client.search( index="cluster_one:my-index-000001", size=1, query={ "match": { "user.id": "kimchy" } }, source=[ "user.id", "message", "http.response.status_code" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e4ea514eb9a01716d9bbc5aa04ee0252.asciidoc000066400000000000000000000005631506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/query-user.asciidoc:192 [source, python] ---- resp = client.perform_request( "POST", "/_security/_query/user", headers={"Content-Type": "application/json"}, body={ "query": { "prefix": { "roles": "other" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e51a86b666f447cda5f634547a8e1a4a.asciidoc000066400000000000000000000003041506101734100265030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-data-stream.asciidoc:28 [source, python] ---- resp = client.indices.create_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e551ea38a2d8f8deac110b33304200cc.asciidoc000066400000000000000000000011131506101734100265160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // reranking/learning-to-rank-search-usage.asciidoc:17 [source, python] ---- resp = client.search( index="my-index", query={ "multi_match": { "fields": [ "title", "content" ], "query": "the quick brown fox" } }, rescore={ "learning_to_rank": { "model_id": "ltr-model", "params": { "query_text": "the quick brown fox" } }, "window_size": 100 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e586d1d2a997133e039fd352a42a72b3.asciidoc000066400000000000000000000007321506101734100263370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/terms-set-query.asciidoc:136 [source, python] ---- resp = client.search( index="job-candidates", query={ "terms_set": { "programming_languages": { "terms": [ "c++", "java", "php" ], "minimum_should_match_field": "required_matches" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e58833449d01379df20ad06dc28144d8.asciidoc000066400000000000000000000004361506101734100262670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update-by-query.asciidoc:331 [source, python] ---- resp = client.update_by_query( index="my-index-000001", conflicts="proceed", query={ "term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e58b7965c3a314c34bc444c6db3b1b79.asciidoc000066400000000000000000000004431506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/enable-index-allocation.asciidoc:104 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", name="index.routing.allocation.enable", flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e5901f48eb8a419b878fc2cb815d8691.asciidoc000066400000000000000000000003551506101734100264500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-settings.asciidoc:50 [source, python] ---- resp = client.cluster.put_settings( persistent={ "indices.recovery.max_bytes_per_sec": "50mb" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e5c710b08a545522d50b4ce35503bc46.asciidoc000066400000000000000000000013251506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:230 [source, python] ---- resp = client.index( index="my-data-stream", pipeline="my-pipeline", document={ "@timestamp": "2099-03-07T11:04:05.000Z", "my-keyword-field": "foo" }, ) print(resp) resp1 = client.bulk( index="my-data-stream", pipeline="my-pipeline", operations=[ { "create": {} }, { "@timestamp": "2099-03-07T11:04:06.000Z", "my-keyword-field": "foo" }, { "create": {} }, { "@timestamp": "2099-03-07T11:04:07.000Z", "my-keyword-field": "bar" } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e5f50b31f165462d883ecbff45f74985.asciidoc000066400000000000000000000011551506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-index-template-v1.asciidoc:20 [source, python] ---- resp = client.indices.put_template( name="template_1", index_patterns=[ "te*", "bar*" ], settings={ "number_of_shards": 1 }, mappings={ "_source": { "enabled": False }, "properties": { "host_name": { "type": "keyword" }, "created_at": { "type": "date", "format": "EEE MMM dd HH:mm:ss Z yyyy" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e5f89a04f50df707a0a53ec0f2eecbbd.asciidoc000066400000000000000000000003451506101734100270500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:77 [source, python] ---- resp = client.get( index="my-index-000001", id="0", source_includes="*.id", source_excludes="entities", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e5f8f83df37ab2296dc4bfed95d7aba7.asciidoc000066400000000000000000000004171506101734100271030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/enable-cluster-allocation.asciidoc:112 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.enable": "all" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e608cd0c034f6c245ea87f425e09ce2f.asciidoc000066400000000000000000000003501506101734100265560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-term-query.asciidoc:10 [source, python] ---- resp = client.search( query={ "span_term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e60b7f75ca806f2c74927c3d9409a986.asciidoc000066400000000000000000000005251506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/create-role-mappings.asciidoc:166 [source, python] ---- resp = client.security.put_role_mapping( name="mapping3", roles=[ "ldap-user" ], enabled=True, rules={ "field": { "realm.name": "ldap1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e60c2bf89fdf38187709d04dd1c55330.asciidoc000066400000000000000000000006171506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/mlt-query.asciidoc:19 [source, python] ---- resp = client.search( query={ "more_like_this": { "fields": [ "title", "description" ], "like": "Once upon a time", "min_term_freq": 1, "max_query_terms": 12 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e60ded7becfd5b2ccaef5bad2aaa93f5.asciidoc000066400000000000000000000005511506101734100274250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:185 [source, python] ---- resp = client.search( aggs={ "products": { "terms": { "field": "product", "size": 5, "show_term_doc_count_error": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e61b5abe85000cc954a42e2cd74f3a26.asciidoc000066400000000000000000000003151506101734100265370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/put-calendar.asciidoc:50 [source, python] ---- resp = client.ml.put_calendar( calendar_id="planned-outages", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6369e7cef82d881af593d5526bf79bd.asciidoc000066400000000000000000000004551506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-term-query.asciidoc:22 [source, python] ---- resp = client.search( query={ "span_term": { "user.id": { "value": "kimchy", "boost": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e63775a2ff22b945ab9d5f630b80c506.asciidoc000066400000000000000000000003031506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/health.asciidoc:202 [source, python] ---- resp = client.cluster.health( index="my-index-000001", level="shards", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e63cf08350e9381f519c2835843be7cd.asciidoc000066400000000000000000000006531506101734100263610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/field-mapping.asciidoc:175 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_date_formats": [ "yyyy/MM||MM/dd/yyyy" ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "create_date": "09/25/2015" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/e642be44a62a89cf4afb2db28220c9a9.asciidoc000066400000000000000000000004541506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:459 [source, python] ---- resp = client.cluster.put_settings( persistent={ "ingest.geoip.downloader.enabled": True, "indices.lifecycle.history_index_enabled": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e650d73c57ab313e686fec01e3b0c90f.asciidoc000066400000000000000000000006531506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:915 [source, python] ---- resp = client.reindex( source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001", "version_type": "external" }, script={ "source": "if (ctx._source.foo == 'bar') {ctx._version++; ctx._source.remove('foo')}", "lang": "painless" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e697ef947f3fb7835f7fadb9125b1043.asciidoc000066400000000000000000000006031506101734100265240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:375 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library ORDER BY page_count DESC", filter={ "range": { "page_count": { "gte": 100, "lte": 200 } } }, fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6b972611c0ec8ab4c240f33f323d85b.asciidoc000066400000000000000000000006451506101734100264720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:418 [source, python] ---- resp = client.search( index="my-index-000001", size="0", aggs={ "by_day": { "date_histogram": { "field": "date", "calendar_interval": "day", "time_zone": "-01:00" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6ccd979c34ba03007e625c6ec3e71a9.asciidoc000066400000000000000000000002131506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:260 [source, python] ---- resp = client.indices.get_alias() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6dcc2911d2416a65eaec9846b956e15.asciidoc000066400000000000000000000002601506101734100265050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/refresh.asciidoc:19 [source, python] ---- resp = client.indices.refresh( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6e47da87079a8b67f767a2a01878cf2.asciidoc000066400000000000000000000007161506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:578 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "set": { "description": "Use geo_point dynamic template for address field", "field": "_dynamic_templates", "value": { "address": "geo_point" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6f6d3aeea7ecea47cfd5c3d727f7004.asciidoc000066400000000000000000000024561506101734100270740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:448 [source, python] ---- resp = client.search( index="retrievers_example", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "query_string": { "query": "(information retrieval) OR (artificial intelligence)", "default_field": "text" } } } }, { "knn": { "field": "vector", "query_vector": [ 0.23, 0.67, 0.89 ], "k": 3, "num_candidates": 5 } } ], "rank_window_size": 10, "rank_constant": 1 } }, collapse={ "field": "year", "inner_hits": { "name": "topic related documents", "_source": [ "year" ] } }, source=False, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e6faae2e272ee57727f38e55a3de5bb2.asciidoc000066400000000000000000000005051506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:557 [source, python] ---- resp = client.search( highlight={ "fields": [ { "title": {} }, { "text": {} } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e715fb8c792bf09ac98f0ceca99beb84.asciidoc000066400000000000000000000002761506101734100270310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:345 [source, python] ---- resp = client.migration.deprecations( index=".ml-anomalies-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e71d300cd87f09a9527cf45395dd7eb1.asciidoc000066400000000000000000000002471506101734100265140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-execute-retention.asciidoc:40 [source, python] ---- resp = client.slm.execute_retention() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e77c2f41a7eca765b0c5f734a66d919f.asciidoc000066400000000000000000000007671506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:133 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information", processors=[ { "attachment": { "field": "data", "properties": [ "content", "title" ], "remove_binary": True } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e784fc00894635470adfd78a0c46b427.asciidoc000066400000000000000000000012351506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-component-template.asciidoc:19 [source, python] ---- resp = client.cluster.put_component_template( name="template_1", template={ "settings": { "number_of_shards": 1 }, "mappings": { "_source": { "enabled": False }, "properties": { "host_name": { "type": "keyword" }, "created_at": { "type": "date", "format": "EEE MMM dd HH:mm:ss Z yyyy" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e7cfe670b4177d1011076f845ec2916c.asciidoc000066400000000000000000000005161506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-manage-data-stream-retention.asciidoc:144 [source, python] ---- resp = client.cluster.put_settings( persistent={ "data_streams.lifecycle.retention.default": "7d", "data_streams.lifecycle.retention.max": "90d" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e7d819634d765cde269e2669e2dc677f.asciidoc000066400000000000000000000003151506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:151 [source, python] ---- resp = client.security.invalidate_api_key( username="myuser", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e7e95022867c72a6563137f066dd2973.asciidoc000066400000000000000000000010411506101734100261350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:207 [source, python] ---- resp = client.search( aggs={ "hotspots": { "geohash_grid": { "field": "location", "precision": 5 }, "aggs": { "significant_crime_types": { "significant_terms": { "field": "crime_type" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e7eca57a5bf5a53cbbe2463bce11495b.asciidoc000066400000000000000000000005071506101734100267700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/valuecount-aggregation.asciidoc:15 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "types_count": { "value_count": { "field": "type" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8211247c280a3fbbbdd32850b743b7b.asciidoc000066400000000000000000000006751506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/put-dfanalytics.asciidoc:723 [source, python] ---- resp = client.ml.put_data_frame_analytics( id="house_price_regression_analysis", source={ "index": "houses_sold_last_10_yrs" }, dest={ "index": "house_price_predictions" }, analysis={ "regression": { "dependent_variable": "price" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e821d27a8b810821707ba860e31f8b78.asciidoc000066400000000000000000000006001506101734100262550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/put-mapping.asciidoc:238 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", properties={ "city": { "type": "text", "fields": { "raw": { "type": "keyword" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e827a9040e137410d62d10bb3b3cbb71.asciidoc000066400000000000000000000002631506101734100263640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/get-watch.asciidoc:55 [source, python] ---- resp = client.watcher.get_watch( id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e82c33def91faddcfeed7b02cd258605.asciidoc000066400000000000000000000010571506101734100270640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/multi-terms-aggregation.asciidoc:248 [source, python] ---- resp = client.search( index="products", aggs={ "genres_and_products": { "multi_terms": { "terms": [ { "field": "genre" }, { "field": "product", "missing": "Product Z" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e84e23232c7ecc8d6377ec2c16a60269.asciidoc000066400000000000000000000006211506101734100264230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:198 [source, python] ---- resp = client.indices.create( index="test", aliases={ "alias_1": {}, "alias_2": { "filter": { "term": { "user.id": "kimchy" } }, "routing": "shard-1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e88a057a13e191e4d5faa22edf2ae8ed.asciidoc000066400000000000000000000003761506101734100270010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:368 [source, python] ---- resp = client.cluster.get_settings( filter_path="**.xpack.profiling.templates.enabled", include_defaults=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e891e1d4805172da45a81f62b6b44aca.asciidoc000066400000000000000000000011711506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:464 [source, python] ---- resp = client.search( size=0, runtime_mappings={ "normalized_genre": { "type": "keyword", "script": "\n String genre = doc['genre'].value;\n if (doc['product'].value.startsWith('Anthology')) {\n emit(genre + ' anthology');\n } else {\n emit(genre);\n }\n " } }, aggs={ "genres": { "terms": { "field": "normalized_genre" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e89bf0d893b7bf43c2d9b44db6cfe21b.asciidoc000066400000000000000000000005111506101734100270040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:295 [source, python] ---- resp = client.search( index="test", query={ "rank_feature": { "field": "pagerank", "log": { "scaling_factor": 4 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8a2726eea5545355d1d0835d4599f55.asciidoc000066400000000000000000000003771506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/ip.asciidoc:126 [source, python] ---- resp = client.search( index="my-index-000001", query={ "term": { "ip_addr": "2001:db8::/48" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8bb5c57bdeff22be8e5f39a99dfe70e.asciidoc000066400000000000000000000013761506101734100271760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/sampler-aggregation.asciidoc:22 [source, python] ---- resp = client.search( index="stackoverflow", size="0", query={ "query_string": { "query": "tags:kibana OR tags:javascript" } }, aggs={ "sample": { "sampler": { "shard_size": 200 }, "aggs": { "keywords": { "significant_terms": { "field": "tags", "exclude": [ "kibana", "javascript" ] } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8c348cabe15dfe58ab4c3cc13a963fe.asciidoc000066400000000000000000000002631506101734100270600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-shards.asciidoc:78 [source, python] ---- resp = client.search_shards( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8cbe2269f3dff6b231e73119e81511d.asciidoc000066400000000000000000000003361506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/exists-query.asciidoc:20 [source, python] ---- resp = client.search( query={ "exists": { "field": "user" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8ea65153d7775f25b08dfdfe6954498.asciidoc000066400000000000000000000005301506101734100264560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/simple-query-string-query.asciidoc:245 [source, python] ---- resp = client.search( query={ "simple_query_string": { "query": "Will Smith", "fields": [ "title", "*_name" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e8f1c9ee003d115ec8f55e57990df6e4.asciidoc000066400000000000000000000003501506101734100266000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-category.asciidoc:154 [source, python] ---- resp = client.ml.get_categories( job_id="esxi_log", page={ "size": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e905543b281e9c41395304da76ed2ea3.asciidoc000066400000000000000000000002601506101734100263340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/troubleshooting.asciidoc:29 [source, python] ---- resp = client.indices.delete( index=".watches", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e930a572e8ddfdecc13498c04007b9e3.asciidoc000066400000000000000000000010301506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:97 [source, python] ---- resp = client.indices.create( index="openai-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1536, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e93ff228ab3e63738e1c83fdfb7424b9.asciidoc000066400000000000000000000006521506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:446 [source, python] ---- resp = client.search( query={ "match": { "user.id": "kimchy" } }, highlight={ "pre_tags": [ "" ], "post_tags": [ "" ], "fields": { "body": {} } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e95ba581b298cd7bb598374afbfed315.asciidoc000066400000000000000000000003071506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/frequent-item-sets-aggregation.asciidoc:173 [source, python] ---- resp = client.async_search.get( id="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e95e61988dc3073a007f7b7445dd233b.asciidoc000066400000000000000000000010721506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/lifecycle/tutorial-migrate-data-stream-from-ilm-to-dsl.asciidoc:192 [source, python] ---- resp = client.indices.put_index_template( name="dsl-data-stream-template", index_patterns=[ "dsl-data-stream*" ], data_stream={}, priority=500, template={ "settings": { "index.lifecycle.name": "pre-dsl-ilm-policy", "index.lifecycle.prefer_ilm": False }, "lifecycle": { "data_retention": "7d" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9625da419bff6470ffd9927c59ca159.asciidoc000066400000000000000000000003601506101734100265310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/rejected-requests.asciidoc:29 [source, python] ---- resp = client.cat.thread_pool( v=True, h="id,name,queue,active,rejected,completed", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9738fe09a99080506a07945795e8eda.asciidoc000066400000000000000000000004271506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/stop-tokenfilter.asciidoc:31 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "stop" ], text="a quick fox jumps over the lazy dog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e99c45a47dc0ba7440aea8a9a99c84fa.asciidoc000066400000000000000000000011321506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significanttext-aggregation.asciidoc:39 [source, python] ---- resp = client.search( index="news", query={ "match": { "content": "Bird flu" } }, aggregations={ "my_sample": { "sampler": { "shard_size": 100 }, "aggregations": { "keywords": { "significant_text": { "field": "content" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9a0b450af6219772631703d602c7092.asciidoc000066400000000000000000000022731506101734100261120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/text-expansion-query.asciidoc:229 [source, python] ---- resp = client.search( index="my-index", query={ "text_expansion": { "ml.tokens": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?", "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": False } } } }, rescore={ "window_size": 100, "query": { "rescore_query": { "text_expansion": { "ml.tokens": { "model_id": ".elser_model_2", "model_text": "How is the weather in Jamaica?", "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9f9e184499a793828233e536fac0487.asciidoc000066400000000000000000000004351506101734100262430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/delete-by-query.asciidoc:412 [source, python] ---- resp = client.delete_by_query( index="my-index-000001", scroll_size="5000", query={ "term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9fc47015922d51c2b05e502ce9c622e.asciidoc000066400000000000000000000006451506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-google-ai-studio.asciidoc:103 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="google_ai_studio_completion", inference_config={ "service": "googleaistudio", "service_settings": { "api_key": "", "model_id": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/e9fe3b53b5b6e1ff9566b5237c0fa513.asciidoc000066400000000000000000000020001506101734100265550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/children-aggregation.asciidoc:59 [source, python] ---- resp = client.index( index="child_example", id="2", routing="1", document={ "join": { "name": "answer", "parent": "1" }, "owner": { "location": "Norfolk, United Kingdom", "display_name": "Sam", "id": 48 }, "body": "Unfortunately you're pretty much limited to FTP...", "creation_date": "2009-05-04T13:45:37.030" }, ) print(resp) resp1 = client.index( index="child_example", id="3", routing="1", refresh=True, document={ "join": { "name": "answer", "parent": "1" }, "owner": { "location": "Norfolk, United Kingdom", "display_name": "Troll", "id": 49 }, "body": "Use Linux...", "creation_date": "2009-05-05T13:45:37.030" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ea020ea32d5cd35e577c61a120f92451.asciidoc000066400000000000000000000016211506101734100263720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-a-data-stream.asciidoc:240 [source, python] ---- resp = client.bulk( index="my-data-stream", operations=[ { "create": {} }, { "@timestamp": "2099-05-06T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" }, { "create": {} }, { "@timestamp": "2099-05-06T16:25:42.000Z", "message": "192.0.2.255 - - [06/May/2099:16:25:42 +0000] \"GET /favicon.ico HTTP/1.0\" 200 3638" } ], ) print(resp) resp1 = client.index( index="my-data-stream", document={ "@timestamp": "2099-05-06T16:21:15.000Z", "message": "192.0.2.42 - - [06/May/2099:16:21:15 +0000] \"GET /images/bg.jpg HTTP/1.0\" 200 24736" }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ea29029884a5fd9a8d8830d25884bf07.asciidoc000066400000000000000000000004331506101734100263650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/parent-id-query.asciidoc:79 [source, python] ---- resp = client.search( index="my-index-000001", query={ "parent_id": { "type": "my-child", "id": "1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea313059c18d6edbd28c3f743a5e7c1c.asciidoc000066400000000000000000000010151506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/significantterms-aggregation.asciidoc:602 [source, python] ---- resp = client.search( query={ "match": { "city": "madrid" } }, aggs={ "tags": { "significant_terms": { "field": "tag", "background_filter": { "term": { "text": "spain" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea5391267ced860c00214c096e08c8d4.asciidoc000066400000000000000000000004241506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:19 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "number_of_replicas": 2 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea5b4d2d87fd4e040afad18903c44869.asciidoc000066400000000000000000000013601506101734100265620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:185 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "top_left": { "lat": 40.73, "lon": -74.1 }, "bottom_right": { "lat": 40.01, "lon": -71.12 } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea61aa2531ea73ccc0acd2d41f0518eb.asciidoc000066400000000000000000000013631506101734100267420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/rank-feature.asciidoc:11 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "pagerank": { "type": "rank_feature" }, "url_length": { "type": "rank_feature", "positive_score_impact": False } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "pagerank": 8, "url_length": 22 }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "rank_feature": { "field": "pagerank" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ea66a620c23337545e409c120c4ed5d9.asciidoc000066400000000000000000000002711506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/ilm-tutorial.asciidoc:207 [source, python] ---- resp = client.ilm.explain_lifecycle( index=".ds-timeseries-*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea68e3428cc2ca3455bf312d09451489.asciidoc000066400000000000000000000007171506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:1244 [source, python] ---- resp = client.indices.create( index="product-index", mappings={ "properties": { "product-vector": { "type": "dense_vector", "dims": 5, "index": False }, "price": { "type": "long" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea690283f301c6ce957efad93d7d5c5d.asciidoc000066400000000000000000000007411506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/length-tokenfilter.asciidoc:109 [source, python] ---- resp = client.indices.create( index="length_example", settings={ "analysis": { "analyzer": { "standard_length": { "tokenizer": "standard", "filter": [ "length" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ea8c4229afa6dd4f1321355542be9912.asciidoc000066400000000000000000000014111506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/attachment.asciidoc:268 [source, python] ---- resp = client.ingest.put_pipeline( id="attachment", description="Extract attachment information", processors=[ { "attachment": { "field": "data", "indexed_chars": 11, "indexed_chars_field": "max_size", "remove_binary": True } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="attachment", document={ "data": "e1xydGYxXGFuc2kNCkxvcmVtIGlwc3VtIGRvbG9yIHNpdCBhbWV0DQpccGFyIH0=" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ea92390651e8ecad0c890658985343c5.asciidoc000066400000000000000000000007371506101734100263070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:557 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="hourly-snapshots", name="", schedule="0 0 * * * ?", repository="my_repository", config={ "indices": "*", "include_global_state": True }, retention={ "expire_after": "1d", "min_count": 1, "max_count": 24 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eab3cad0257c539c5efd2689aa52f242.asciidoc000066400000000000000000000003311506101734100266270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:111 [source, python] ---- resp = client.indices.data_streams_stats( name="my-data-stream", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eac3bc428d03eb4926fa51f74b9bc4d5.asciidoc000066400000000000000000000031431506101734100267140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/highlighting.asciidoc:354 [source, python] ---- resp = client.search( query={ "match": { "comment": { "query": "foo bar" } } }, rescore={ "window_size": 50, "query": { "rescore_query": { "match_phrase": { "comment": { "query": "foo bar", "slop": 1 } } }, "rescore_query_weight": 10 } }, source=False, highlight={ "order": "score", "fields": { "comment": { "fragment_size": 150, "number_of_fragments": 3, "highlight_query": { "bool": { "must": { "match": { "comment": { "query": "foo bar" } } }, "should": { "match_phrase": { "comment": { "query": "foo bar", "slop": 1, "boost": 10 } } }, "minimum_should_match": 0 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ead4d875877d618594d0cdbdd9b7998b.asciidoc000066400000000000000000000004171506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/add-nodes.asciidoc:170 [source, python] ---- resp = client.cluster.delete_voting_config_exclusions() print(resp) resp1 = client.cluster.delete_voting_config_exclusions( wait_for_removal=False, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/eada8af6588584ac88f1e5b15f4a5c2a.asciidoc000066400000000000000000000023261506101734100267320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/valuecount-aggregation.asciidoc:97 [source, python] ---- resp = client.index( index="metrics_index", id="1", document={ "network.name": "net-1", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 3, 7, 23, 12, 6 ] } }, ) print(resp) resp1 = client.index( index="metrics_index", id="2", document={ "network.name": "net-2", "latency_histo": { "values": [ 0.1, 0.2, 0.3, 0.4, 0.5 ], "counts": [ 8, 17, 8, 7, 6 ] } }, ) print(resp1) resp2 = client.search( index="metrics_index", size="0", aggs={ "total_requests": { "value_count": { "field": "latency_histo" } } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/eae8931d01b3b878dd0c45214121e662.asciidoc000066400000000000000000000005441506101734100263260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:329 [source, python] ---- resp = client.search( index="my_locations", query={ "geo_bounding_box": { "pin.location": { "top_left": "dr", "bottom_right": "dr" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eaf53b05959cc6b7fb09579baf34de68.asciidoc000066400000000000000000000021231506101734100266600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline.asciidoc:127 [source, python] ---- resp = client.search( aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sale_type": { "terms": { "field": "type" }, "aggs": { "sales": { "sum": { "field": "price" } } } }, "hat_vs_bag_ratio": { "bucket_script": { "buckets_path": { "hats": "sale_type['hat']>sales", "bags": "sale_type['bag']>sales" }, "script": "params.hats / params.bags" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eaf6a846ded090fd6ac48269ad2b328b.asciidoc000066400000000000000000000006061506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-rollover.asciidoc:38 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index.lifecycle.name": "my_policy", "index.lifecycle.rollover_alias": "my_data" }, aliases={ "my_data": { "is_write_index": True } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eafdabe80b21b90495555fa6d9089412.asciidoc000066400000000000000000000004241506101734100265010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-service-token-caches.asciidoc:68 [source, python] ---- resp = client.security.clear_cached_service_tokens( namespace="elastic", service="fleet-server", name="token1,token2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb09235533a1c65a0627ba05f7d4ad4d.asciidoc000066400000000000000000000011551506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/context-suggest.asciidoc:253 [source, python] ---- resp = client.index( index="place", id="1", document={ "suggest": { "input": "timmy's", "contexts": { "location": [ { "lat": 43.6624803, "lon": -79.3863353 }, { "lat": 43.6624718, "lon": -79.3873227 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb141f8df8ead40ff7440b623ea92267.asciidoc000066400000000000000000000010621506101734100265600ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/copy-to.asciidoc:94 [source, python] ---- resp = client.indices.create( index="good_example_index", mappings={ "properties": { "field_1": { "type": "text", "copy_to": [ "field_2", "field_3" ] }, "field_2": { "type": "text" }, "field_3": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb14cedd3bdda9ffef3c118f3d528dcd.asciidoc000066400000000000000000000003401506101734100272740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/update.asciidoc:178 [source, python] ---- resp = client.update( index="test", id="1", script="ctx._source.new_field = 'value_of_new_field'", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb33a7e5a0fe83fdaa0f79354f659428.asciidoc000066400000000000000000000006411506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:740 [source, python] ---- resp = client.indices.put_mapping( index="my-index-000001", runtime={ "client_ip": { "type": "ip", "script": { "source": "String m = doc[\"message\"].value; int end = m.indexOf(\" \"); emit(m.substring(0, end));" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb4e43b47867b54214a8630172dd0e21.asciidoc000066400000000000000000000003451506101734100262470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-forecast.asciidoc:75 [source, python] ---- resp = client.ml.delete_forecast( job_id="total-requests", forecast_id="_all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb54506fbc71a7d250e86b22d0600114.asciidoc000066400000000000000000000003311506101734100263050ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/list-connectors-api.asciidoc:117 [source, python] ---- resp = client.connector.list( service_type="sharepoint_online,google_drive", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb5486d2fe4283475bf9e0e09280be16.asciidoc000066400000000000000000000006431506101734100264350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-forcemerge.asciidoc:64 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "forcemerge": { "max_num_segments": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb5987b58dae90c3a8a1609410be0570.asciidoc000066400000000000000000000021601506101734100264130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1092 [source, python] ---- resp = client.indices.create( index="indonesian_example", settings={ "analysis": { "filter": { "indonesian_stop": { "type": "stop", "stopwords": "_indonesian_" }, "indonesian_keywords": { "type": "keyword_marker", "keywords": [ "contoh" ] }, "indonesian_stemmer": { "type": "stemmer", "language": "indonesian" } }, "analyzer": { "rebuilt_indonesian": { "tokenizer": "standard", "filter": [ "lowercase", "indonesian_stop", "indonesian_keywords", "indonesian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb6d62f1d855a8e8fe9eab2656d47504.asciidoc000066400000000000000000000014531506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/phrase-suggest.asciidoc:410 [source, python] ---- resp = client.search( index="test", suggest={ "text": "obel prize", "simple_phrase": { "phrase": { "field": "title.trigram", "size": 1, "direct_generator": [ { "field": "title.trigram", "suggest_mode": "always" }, { "field": "title.reverse", "suggest_mode": "always", "pre_filter": "reverse", "post_filter": "reverse" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb964d8d7f27c057a4542448ba5b74e4.asciidoc000066400000000000000000000004771506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/get-snapshot-api.asciidoc:488 [source, python] ---- resp = client.snapshot.get( repository="my_repository", snapshot="snapshot*", size="2", sort="name", after="c25hcHNob3RfMixteV9yZXBvc2l0b3J5LHNuYXBzaG90XzI=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb96d7dd5f3116a50f7a86b729f1a934.asciidoc000066400000000000000000000005301506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-scheduling-api.asciidoc:126 [source, python] ---- resp = client.connector.update_scheduling( connector_id="my-connector", scheduling={ "full": { "enabled": True, "interval": "0 10 0 * * ?" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eb9a41f7fc8bdf5559bb9db822ae3a65.asciidoc000066400000000000000000000045531506101734100270210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/bulk-create-roles.asciidoc:236 [source, python] ---- resp = client.security.bulk_put_role( roles={ "my_admin_role": { "cluster": [ "bad_cluster_privilege" ], "indices": [ { "names": [ "index1", "index2" ], "privileges": [ "all" ], "field_security": { "grant": [ "title", "body" ] }, "query": "{\"match\": {\"title\": \"foo\"}}" } ], "applications": [ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], "run_as": [ "other_user" ], "metadata": { "version": 1 } }, "my_user_role": { "cluster": [ "all" ], "indices": [ { "names": [ "index1" ], "privileges": [ "read" ], "field_security": { "grant": [ "title", "body" ] }, "query": "{\"match\": {\"title\": \"foo\"}}" } ], "applications": [ { "application": "myapp", "privileges": [ "admin", "read" ], "resources": [ "*" ] } ], "run_as": [ "other_user" ], "metadata": { "version": 1 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ebb1c7554e91adb4552599f3e5de1865.asciidoc000066400000000000000000000004211506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/split-index.asciidoc:90 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "index": { "number_of_routing_shards": 30 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ebd76a45e153c4656c5871e23b7b5508.asciidoc000066400000000000000000000003401506101734100263420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delete-app-privileges.asciidoc:47 [source, python] ---- resp = client.security.delete_privileges( application="myapp", name="read", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ebef3dc8ed1766d433a5cffc40fde7ae.asciidoc000066400000000000000000000003031506101734100272210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:289 [source, python] ---- resp = client.ilm.remove_policy( index="logs-my_app-default", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec0e50f78390b8622cef4e0b0cd45967.asciidoc000066400000000000000000000004111506101734100265000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql-search-api.asciidoc:586 [source, python] ---- resp = client.eql.search( index="my-data-stream", query="\n process where (process.name == \"cmd.exe\" and process.pid != 2013)\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec135f0cc0d3f526df68000b2a95c65b.asciidoc000066400000000000000000000004251506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // migration/migrate_9_0.asciidoc:403 [source, python] ---- resp = client.indices.create_from( source=".ml-anomalies-custom-example", dest=".reindexed-v9-ml-anomalies-custom-example", create_from=None, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec195297eb804cba1cb19c9926773059.asciidoc000066400000000000000000000005171506101734100263570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/set-up-lifecycle-policy.asciidoc:265 [source, python] ---- resp = client.indices.put_settings( index="mylogs-pre-ilm*", settings={ "index": { "lifecycle": { "name": "mylogs_policy_existing" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec420b28e327f332c9e99d6040c4eb3f.asciidoc000066400000000000000000000005351506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/geo-match-enrich-policy-type-ex.asciidoc:117 [source, python] ---- resp = client.index( index="users", id="0", pipeline="postal_lookup", document={ "first_name": "Mardy", "last_name": "Brown", "geo_location": "POINT (13.5 52.5)" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec44999b6618ac6bbacb23eb08c0fa88.asciidoc000066400000000000000000000014601506101734100267230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:324 [source, python] ---- resp = client.search( index="my-index", runtime_mappings={ "gc_size": { "type": "keyword", "script": "\n Map gc=dissect('[%{@timestamp}][%{code}][%{desc}] %{ident} used %{usize}, capacity %{csize}, committed %{comsize}, reserved %{rsize}').extract(doc[\"gc.keyword\"].value);\n if (gc != null) emit(\"used\" + ' ' + gc.usize + ', ' + \"capacity\" + ' ' + gc.csize + ', ' + \"committed\" + ' ' + gc.comsize);\n " } }, size=1, aggs={ "sizes": { "terms": { "field": "gc_size", "size": 10 } } }, fields=[ "gc_size" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec4b43c3ebd8816799fa004596b2f0cb.asciidoc000066400000000000000000000004661506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/slowlog.asciidoc:232 [source, python] ---- resp = client.indices.put_settings( index="*", settings={ "index.indexing.slowlog.include.user": True, "index.indexing.slowlog.threshold.index.warn": "30s" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec5a2ce156c36aaa267fa31dd9367307.asciidoc000066400000000000000000000006151506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/checkpoints.asciidoc:80 [source, python] ---- resp = client.ingest.put_pipeline( id="set_ingest_time", description="Set ingest timestamp.", processors=[ { "set": { "field": "event.ingested", "value": "{{{_ingest.timestamp}}}" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec69543e39c1f6afb5aff6fb9adc400d.asciidoc000066400000000000000000000010171506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/highlighting-multi-fields.asciidoc:29 [source, python] ---- resp = client.bulk( index="index1", refresh=True, operations=[ { "index": { "_id": "doc1" } }, { "comment": "run with scissors" }, { "index": { "_id": "doc2" } }, { "comment": "running with scissors" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ec736c31f49c54e5424efa2e53b22906.asciidoc000066400000000000000000000013001506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/user-agent.asciidoc:31 [source, python] ---- resp = client.ingest.put_pipeline( id="user_agent", description="Add user agent information", processors=[ { "user_agent": { "field": "agent" } } ], ) print(resp) resp1 = client.index( index="my-index-000001", id="my_id", pipeline="user_agent", document={ "agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36" }, ) print(resp1) resp2 = client.get( index="my-index-000001", id="my_id", ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ec8f176ebf436d5719bdeca4a9ea8220.asciidoc000066400000000000000000000012361506101734100267270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/multi-terms-aggregation.asciidoc:172 [source, python] ---- resp = client.search( index="products", runtime_mappings={ "genre.length": { "type": "long", "script": "emit(doc['genre'].value.length())" } }, aggs={ "genres_and_products": { "multi_terms": { "terms": [ { "field": "genre.length" }, { "field": "product" } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ecc57597f6b791d1151ad79d9f4ce67b.asciidoc000066400000000000000000000007031506101734100266100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:643 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "calendar_interval": "1M", "format": "yyyy-MM-dd", "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ece01f9382e450f669c0e0925e5b30e5.asciidoc000066400000000000000000000011401506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/daterange-aggregation.asciidoc:305 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "range": { "date_range": { "field": "date", "format": "MM-yyy", "ranges": [ { "to": "now-10M/M" }, { "from": "now-10M/M" } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ecfd0d94dd14ef05dfa861f22544b388.asciidoc000066400000000000000000000003761506101734100266520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-error-api.asciidoc:87 [source, python] ---- resp = client.connector.update_error( connector_id="my-connector", error="Houston, we have a problem!", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed01b542bb56b1521ea8d5a3c67aa891.asciidoc000066400000000000000000000005531506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/repository-gcs.asciidoc:142 [source, python] ---- resp = client.snapshot.create_repository( name="my_gcs_repository", repository={ "type": "gcs", "settings": { "bucket": "my_bucket", "client": "my_alternate_client" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed01d27b8f80bb4ea54bf4e32b8d6258.asciidoc000066400000000000000000000016021506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/geodistance-aggregation.asciidoc:203 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "rings_around_amsterdam": { "geo_distance": { "field": "location", "origin": "POINT (4.894 52.3760)", "ranges": [ { "to": 100000, "key": "first_ring" }, { "from": 100000, "to": 300000, "key": "second_ring" }, { "from": 300000, "key": "third_ring" } ], "keyed": True } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed09432c6069e41409f0a5e0d1d3842a.asciidoc000066400000000000000000000004611506101734100263250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/apis/reload-analyzers.asciidoc:16 [source, python] ---- resp = client.indices.reload_search_analyzers( index="my-index-000001", ) print(resp) resp1 = client.indices.clear_cache( index="my-index-000001", request=True, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/ed12eeadb4e530b53c4975dadaa06054.asciidoc000066400000000000000000000002751506101734100267000ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/grok.asciidoc:281 [source, python] ---- resp = client.ingest.processor_grok( ecs_compatibility="v1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed250b74bc77c15bb794f55a12d762c3.asciidoc000066400000000000000000000002621506101734100264740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // setup/sysconfig/swap.asciidoc:77 [source, python] ---- resp = client.nodes.info( filter_path="**.mlockall", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed27843eff311f3011b679e97e6fda50.asciidoc000066400000000000000000000005341506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:647 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="my_snapshot_2099.05.06", indices="my-index,logs-my_app-default", index_settings={ "index.number_of_replicas": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed3bdf4d6799b43526851e92b6a60c55.asciidoc000066400000000000000000000003501506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-field-mapping.asciidoc:135 [source, python] ---- resp = client.indices.get_field_mapping( index="publications", fields="author.id,abstract,name", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed5bfa68d01e079aac94de78dc5caddf.asciidoc000066400000000000000000000002251506101734100272240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/master.asciidoc:57 [source, python] ---- resp = client.cat.master( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed5c3b45e8de912faba44507d827eb93.asciidoc000066400000000000000000000006651506101734100266540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:501 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": "POINT (-70 40)", "order": "asc", "unit": "km" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed60daeaec351fc8b3f39a3dfad6fc4e.asciidoc000066400000000000000000000010411506101734100272700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-mapping.asciidoc:275 [source, python] ---- resp = client.indices.create( index="amazon-bedrock-embeddings", mappings={ "properties": { "content_embedding": { "type": "dense_vector", "dims": 1024, "element_type": "float", "similarity": "dot_product" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed688d86eeaa4d7969acb0f574eb917f.asciidoc000066400000000000000000000005121506101734100267520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:495 [source, python] ---- resp = client.index( index="my_queries1", id="1", refresh=True, document={ "query": { "term": { "my_field.prefix": "abc" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed6b996ea389e0955a01c2e67f4c8339.asciidoc000066400000000000000000000003341506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:101 [source, python] ---- resp = client.field_caps( index="my-index-000001", fields="my-field", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed7fa1971ac322aeccd6391ab32d0490.asciidoc000066400000000000000000000004311506101734100266140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/increase-master-node-capacity.asciidoc:83 [source, python] ---- resp = client.cat.nodes( v=True, h="name,master,node.role,disk.used_percent,disk.used,disk.avail,disk.total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ed85ed833bec7286a0dfbe64077c5715.asciidoc000066400000000000000000000021111506101734100265660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:530 [source, python] ---- resp = client.indices.create( index="danish_example", settings={ "analysis": { "filter": { "danish_stop": { "type": "stop", "stopwords": "_danish_" }, "danish_keywords": { "type": "keyword_marker", "keywords": [ "eksempel" ] }, "danish_stemmer": { "type": "stemmer", "language": "danish" } }, "analyzer": { "rebuilt_danish": { "tokenizer": "standard", "filter": [ "lowercase", "danish_stop", "danish_keywords", "danish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/edae616e1244babf6032aecc6aaaf836.asciidoc000066400000000000000000000007661506101734100270450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:474 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": { "lat": 40, "lon": -70 }, "order": "asc", "unit": "km" } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/edb25dc0162b039d477cb06aed2d6275.asciidoc000066400000000000000000000024011506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/sparse-vector-query.asciidoc:152 [source, python] ---- resp = client.search( index="my-index", query={ "bool": { "should": [ { "sparse_vector": { "field": "ml.inference.title_expanded.predicted_value", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "boost": 1 } }, { "sparse_vector": { "field": "ml.inference.description_expanded.predicted_value", "inference_id": "my-elser-model", "query": "How is the weather in Jamaica?", "boost": 1 } }, { "multi_match": { "query": "How is the weather in Jamaica?", "fields": [ "title", "description" ], "boost": 4 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/edb5cad890208014ecd91f3f739ce193.asciidoc000066400000000000000000000003071506101734100265630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/set-up-tsds.asciidoc:276 [source, python] ---- resp = client.indices.rollover( alias="metrics-weather_sensors-dev", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/edcfadbfb14d97a2f5e6e21ef7039818.asciidoc000066400000000000000000000017131506101734100270120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:41 [source, python] ---- resp = client.search( query={ "function_score": { "query": { "match_all": {} }, "boost": "5", "functions": [ { "filter": { "match": { "test": "bar" } }, "random_score": {}, "weight": 23 }, { "filter": { "match": { "test": "cat" } }, "weight": 42 } ], "max_boost": 42, "score_mode": "max", "boost_mode": "multiply", "min_score": 42 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee08328cd157d547de19b4abe867b23e.asciidoc000066400000000000000000000002351506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // alias.asciidoc:277 [source, python] ---- resp = client.indices.get_alias( name="logs", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee0fd67acc807f1bddf5e9807c06e7eb.asciidoc000066400000000000000000000063151506101734100270770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/weighted-tokens-query.asciidoc:86 [source, python] ---- resp = client.search( index="my-index", query={ "weighted_tokens": { "query_expansion_field": { "tokens": { "2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012 }, "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": False } } } }, rescore={ "window_size": 100, "query": { "rescore_query": { "weighted_tokens": { "query_expansion_field": { "tokens": { "2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012 }, "pruning_config": { "tokens_freq_ratio_threshold": 5, "tokens_weight_threshold": 0.4, "only_score_pruned_tokens": True } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee223e604bb695cad2517d28ae63ac34.asciidoc000066400000000000000000000017061506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rrf.asciidoc:53 [source, python] ---- resp = client.search( index="example-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "term": { "text": "shoes" } } } }, { "knn": { "field": "vector", "query_vector": [ 1.25, 2, 3.5 ], "k": 50, "num_candidates": 100 } } ], "rank_window_size": 50, "rank_constant": 20 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee2d97090d617ed8aa2a87ea33556dd7.asciidoc000066400000000000000000000004461506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/truncate-tokenfilter.asciidoc:24 [source, python] ---- resp = client.indices.analyze( tokenizer="whitespace", filter=[ "truncate" ], text="the quinquennial extravaganza carried on", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee577c4c7cc723e99569ea2d1137adba.asciidoc000066400000000000000000000003151506101734100266500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-roles-cache.asciidoc:48 [source, python] ---- resp = client.security.clear_cached_roles( name="my_admin_role", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee634d59def6302134d24fa90e18b609.asciidoc000066400000000000000000000007671506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/deciders/machine-learning-decider.asciidoc:48 [source, python] ---- resp = client.autoscaling.put_autoscaling_policy( name="my_autoscaling_policy", policy={ "roles": [ "ml" ], "deciders": { "ml": { "num_anomaly_jobs_in_queue": 5, "num_analytics_jobs_in_queue": 3, "down_scale_delay": "30m" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ee90d1fb22b59d30da339d825303b912.asciidoc000066400000000000000000000012431506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/put-app-privileges.asciidoc:136 [source, python] ---- resp = client.security.put_privileges( privileges={ "app01": { "read": { "actions": [ "action:login", "data:read/*" ] }, "write": { "actions": [ "action:login", "data:write/*" ] } }, "app02": { "all": { "actions": [ "*" ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eeb35b759bd239bb773c8ebd5fe63d05.asciidoc000066400000000000000000000010041506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geocentroid-aggregation.asciidoc:79 [source, python] ---- resp = client.search( index="museums", size="0", aggs={ "cities": { "terms": { "field": "city.keyword" }, "aggs": { "centroid": { "geo_centroid": { "field": "location" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eec051555c8050d017d3fe38ea59e3a0.asciidoc000066400000000000000000000004241506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search.asciidoc:915 [source, python] ---- resp = client.search( index="my-index-000001", from_="40", size="20", query={ "term": { "user.id": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eed37703cfe8fec093ed5a42210a6ffd.asciidoc000066400000000000000000000014451506101734100270030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/rollup-getting-started.asciidoc:38 [source, python] ---- resp = client.rollup.put_job( id="sensor", index_pattern="sensor-*", rollup_index="sensor_rollup", cron="*/30 * * * * ?", page_size=1000, groups={ "date_histogram": { "field": "timestamp", "fixed_interval": "60m" }, "terms": { "fields": [ "node" ] } }, metrics=[ { "field": "temperature", "metrics": [ "min", "max", "sum" ] }, { "field": "voltage", "metrics": [ "avg" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eee6110831c08b9c1b3f56b24656e95b.asciidoc000066400000000000000000000006431506101734100264220ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-hugging-face.asciidoc:107 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="hugging-face-embeddings", inference_config={ "service": "hugging_face", "service_settings": { "api_key": "", "url": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eef9deff7f9799d1f7657bb7e2afb7f1.asciidoc000066400000000000000000000003201506101734100271230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/restore-snapshot.asciidoc:429 [source, python] ---- resp = client.indices.delete( index="*", expand_wildcards="all", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef10e8d07d9fae945e035d5dee1e9754.asciidoc000066400000000000000000000006601506101734100266620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/flatten-graph-tokenfilter.asciidoc:118 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "synonym_graph", "synonyms": [ "dns, domain name system" ] }, "flatten_graph" ], text="domain name system is fragile", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef22234b97cc06d7dd620b4ce7c97b31.asciidoc000066400000000000000000000004111506101734100265510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:700 [source, python] ---- resp = client.reindex( max_docs=1, source={ "index": "my-index-000001" }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef33b3b373f7040b874146599db5d557.asciidoc000066400000000000000000000003601506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:179 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "lowercase" ], text="this is a test", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef3666b5d288faefbcbc4a25e8f506da.asciidoc000066400000000000000000000002771506101734100270740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:84 [source, python] ---- resp = client.count( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef46c42d473b2acc151a6a41272e0f14.asciidoc000066400000000000000000000011461506101734100264520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:661 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic": "runtime", "runtime": { "day_of_week": { "type": "keyword", "script": { "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } } }, "properties": { "@timestamp": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef643bab44e7de6ddddde23a2eece5c7.asciidoc000066400000000000000000000005451506101734100273040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/getting-started.asciidoc:283 [source, python] ---- resp = client.index( index="books", document={ "name": "The Great Gatsby", "author": "F. Scott Fitzgerald", "release_date": "1925-04-10", "page_count": 180, "language": "EN" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef779b87b3b0fb6e6bae9c8875e3a1cf.asciidoc000066400000000000000000000013001506101734100270160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:699 [source, python] ---- resp = client.search( index="sales", size="0", runtime_mappings={ "date.promoted_is_tomorrow": { "type": "date", "script": "\n long date = doc['date'].value.toInstant().toEpochMilli();\n if (doc['promoted'].value) {\n date += 86400;\n }\n emit(date);\n " } }, aggs={ "sales_over_time": { "date_histogram": { "field": "date.promoted_is_tomorrow", "calendar_interval": "1M" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef867e563cbffe7866769a096b5d7a92.asciidoc000066400000000000000000000013241506101734100265460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/cumulative-sum-aggregation.asciidoc:40 [source, python] ---- resp = client.search( index="sales", size=0, aggs={ "sales_per_month": { "date_histogram": { "field": "date", "calendar_interval": "month" }, "aggs": { "sales": { "sum": { "field": "price" } }, "cumulative_sales": { "cumulative_sum": { "buckets_path": "sales" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef8f30e85e12e9a5a8817d28977598e4.asciidoc000066400000000000000000000011611506101734100264040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/range-aggregation.asciidoc:13 [source, python] ---- resp = client.search( index="sales", aggs={ "price_ranges": { "range": { "field": "price", "ranges": [ { "to": 100 }, { "from": 100, "to": 200 }, { "from": 200 } ] } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ef9c29759459904fef162acd223462c4.asciidoc000066400000000000000000000003141506101734100263560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2595 [source, python] ---- resp = client.nodes.stats( metric="ingest", filter_path="nodes.*.ingest", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/efa146bf81a9351ba42b92a6decbcfee.asciidoc000066400000000000000000000010561506101734100271310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/common-script-uses.asciidoc:173 [source, python] ---- resp = client.indices.put_mapping( index="my-index", runtime={ "http.response": { "type": "long", "script": "\n String response=dissect('%{clientip} %{ident} %{auth} [%{@timestamp}] \"%{verb} %{request} HTTP/%{httpversion}\" %{response} %{size}').extract(doc[\"message\"].value)?.response;\n if (response != null) emit(Integer.parseInt(response));\n " } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/efa924638043f3a6b23ccb824d757eba.asciidoc000066400000000000000000000010301506101734100265470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/multivalued-fields.asciidoc:11 [source, python] ---- resp = client.bulk( index="mv", refresh=True, operations=[ { "index": {} }, { "a": 1, "b": [ 2, 1 ] }, { "index": {} }, { "a": 2, "b": 3 } ], ) print(resp) resp1 = client.esql.query( query="FROM mv | LIMIT 2", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/efbd4936cca1a752493d8fa2ba6ad1a3.asciidoc000066400000000000000000000011101506101734100267550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:130 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "runtime": { "day_of_week": { "type": "keyword", "script": { "source": "emit(doc['@timestamp'].value.dayOfWeekEnum.getDisplayName(TextStyle.FULL, Locale.ENGLISH))" } } }, "properties": { "@timestamp": { "type": "date" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eff2fc92d46eb3c8f4d424eed18f54a2.asciidoc000066400000000000000000000005671506101734100270210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:19 [source, python] ---- resp = client.search( query={ "function_score": { "query": { "match_all": {} }, "boost": "5", "random_score": {}, "boost_mode": "multiply" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/eff8ecaed1ed084909c64450fc363a20.asciidoc000066400000000000000000000003531506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/update-settings.asciidoc:101 [source, python] ---- resp = client.cluster.put_settings( transient={ "indices.recovery.max_bytes_per_sec": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f03352bb1129938a89f97e4b650038dd.asciidoc000066400000000000000000000010211506101734100262610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:223 [source, python] ---- resp = client.ingest.put_pipeline( id="amazon_bedrock_embeddings_pipeline", processors=[ { "inference": { "model_id": "amazon_bedrock_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f04e1284d09ceb4443d67b2ef9c7f476.asciidoc000066400000000000000000000003511506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/delete-snapshot-api.asciidoc:36 [source, python] ---- resp = client.snapshot.delete( repository="my_repository", snapshot="my_snapshot", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f0816beb8ac21cb0940858b72f6b1946.asciidoc000066400000000000000000000002641506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/fielddata.asciidoc:132 [source, python] ---- resp = client.cat.fielddata( fields="body,soul", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f097c02541056f3c0fc855e7bbeef8a8.asciidoc000066400000000000000000000021231506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:1746 [source, python] ---- resp = client.indices.create( index="swedish_example", settings={ "analysis": { "filter": { "swedish_stop": { "type": "stop", "stopwords": "_swedish_" }, "swedish_keywords": { "type": "keyword_marker", "keywords": [ "exempel" ] }, "swedish_stemmer": { "type": "stemmer", "language": "swedish" } }, "analyzer": { "rebuilt_swedish": { "tokenizer": "standard", "filter": [ "lowercase", "swedish_stop", "swedish_keywords", "swedish_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f09817fd13ff3dce52eb79d0722409c3.asciidoc000066400000000000000000000015331506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/percolator.asciidoc:115 [source, python] ---- resp = client.indices.create( index="new_index", mappings={ "properties": { "query": { "type": "percolator" }, "body": { "type": "text" } } }, ) print(resp) resp1 = client.reindex( refresh=True, source={ "index": "index" }, dest={ "index": "new_index" }, ) print(resp1) resp2 = client.indices.update_aliases( actions=[ { "remove": { "index": "index", "alias": "queries" } }, { "add": { "index": "new_index", "alias": "queries" } } ], ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/f0bfc8d7ab4eb94ea5fdf2e087d8cf5b.asciidoc000066400000000000000000000011731506101734100272340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/boxplot-aggregation.asciidoc:83 [source, python] ---- resp = client.search( index="latency", size=0, runtime_mappings={ "load_time.seconds": { "type": "long", "script": { "source": "emit(doc['load_time'].value / params.timeUnit)", "params": { "timeUnit": 1000 } } } }, aggs={ "load_time_boxplot": { "boxplot": { "field": "load_time.seconds" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f0c3235d8fce641d6ff8ce90ab7b7b8b.asciidoc000066400000000000000000000005141506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-termvectors.asciidoc:120 [source, python] ---- resp = client.mtermvectors( index="my-index-000001", ids=[ "1", "2" ], parameters={ "fields": [ "message" ], "term_statistics": True }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f10ab582387b2c157917a60205c993f7.asciidoc000066400000000000000000000005761506101734100262110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/meta.asciidoc:9 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "latency": { "type": "long", "meta": { "unit": "ms" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f128a9dff5051b47efe2c53c4454a68f.asciidoc000066400000000000000000000005261506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/rollover-index.asciidoc:261 [source, python] ---- resp = client.indices.rollover( alias="my-data-stream", conditions={ "max_age": "7d", "max_docs": 1000, "max_primary_shard_size": "50gb", "max_primary_shard_docs": "2000" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f14d0e4a280fee540e8e5f0fc4d0e9f1.asciidoc000066400000000000000000000005331506101734100267160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-grid-query.asciidoc:174 [source, python] ---- resp = client.search( index="my_locations", size=0, aggs={ "grouped": { "geotile_grid": { "field": "location", "precision": 6 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1508a2221152842894819e762e63491.asciidoc000066400000000000000000000004651506101734100257140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:696 [source, python] ---- resp = client.sql.query( format="json", keep_on_completion=True, wait_for_completion_timeout="2s", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f160561efab38e40c2feebf5a2542ab5.asciidoc000066400000000000000000000003261506101734100267020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/nodes-stats.asciidoc:2603 [source, python] ---- resp = client.nodes.stats( metric="ingest", filter_path="nodes.*.ingest.pipelines", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f18248c181690b81d090275b072f0070.asciidoc000066400000000000000000000004401506101734100260230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1351 [source, python] ---- resp = client.eql.search( index="my-data-stream", keep_alive="2d", wait_for_completion_timeout="2s", query="\n process where process.name == \"cmd.exe\"\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f187ac2dc35425cb0ef48f328cc7e435.asciidoc000066400000000000000000000004601506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-cert.asciidoc:195 [source, python] ---- resp = client.security.put_user( username="cross-search-user", password="l0ng-r4nd0m-p@ssw0rd", roles=[ "remote-search" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1b24217b1d9ba6ea5e4fa6e6f412022.asciidoc000066400000000000000000000006041506101734100265350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/post-inference.asciidoc:138 [source, python] ---- resp = client.inference.inference( task_type="rerank", inference_id="cohere_rerank", input=[ "luke", "like", "leia", "chewy", "r2d2", "star", "wars" ], query="star wars main character", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1bf0c03581b79c3324cfa3246a60e4d.asciidoc000066400000000000000000000007431506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:183 [source, python] ---- resp = client.indices.create( index="my-byte-quantized-index", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 64, "index": True, "index_options": { "type": "bbq_hnsw" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1bf3edbd9e6c7e01b00c74c99a58b61.asciidoc000066400000000000000000000010221506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1454 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "cluster_one": { "seeds": [ "127.0.0.1:9300" ] }, "cluster_two": { "seeds": [ "127.0.0.1:9301" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1d2b8169160adfd27f32988113f0f9f.asciidoc000066400000000000000000000007551506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc:148 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_analyzer": { "tokenizer": "keyword", "filter": [ "word_delimiter" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1dc6f69453867ffafe86e998dd464d9.asciidoc000066400000000000000000000005221506101734100266370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/pathhierarchy-tokenizer.asciidoc:309 [source, python] ---- resp = client.search( index="file-path-test", query={ "term": { "file_path.tree_reversed": { "value": "my_photo1.jpg" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f1e2af6dbb30fc5335e7d0b5507a2a93.asciidoc000066400000000000000000000003011506101734100266120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/reset-job.asciidoc:62 [source, python] ---- resp = client.ml.reset_job( job_id="total-requests", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2175feadc2abe545899889e6d4ffcad.asciidoc000066400000000000000000000003071506101734100270300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // slm/apis/slm-get.asciidoc:77 [source, python] ---- resp = client.slm.get_lifecycle( policy_id="daily-snapshots", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f235544a883fd04bed2dc369b0c450f3.asciidoc000066400000000000000000000004651506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:409 [source, python] ---- resp = client.sql.query( format="txt", query="SELECT * FROM library", filter={ "terms": { "_routing": [ "abc" ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2359acfb6eaa919125463cc1d3a7cd1.asciidoc000066400000000000000000000005641506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authorization/mapping-roles.asciidoc:138 [source, python] ---- resp = client.security.put_role_mapping( name="admins", roles=[ "monitoring", "user" ], rules={ "field": { "groups": "cn=admins,dc=example,dc=com" } }, enabled=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f268416813befd13c604642c6fe6eda9.asciidoc000066400000000000000000000013241506101734100265060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/lowercase-tokenfilter.asciidoc:131 [source, python] ---- resp = client.indices.create( index="custom_lowercase_example", settings={ "analysis": { "analyzer": { "greek_lowercase_example": { "type": "custom", "tokenizer": "standard", "filter": [ "greek_lowercase" ] } }, "filter": { "greek_lowercase": { "type": "lowercase", "language": "greek" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f27c28ddbf4c266b5f42d14da837b8de.asciidoc000066400000000000000000000002171506101734100267250ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/flush.asciidoc:147 [source, python] ---- resp = client.indices.flush() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f281ff50b2cdb67ac0ece93f1594fa95.asciidoc000066400000000000000000000017051506101734100267300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-shape-query.asciidoc:111 [source, python] ---- resp = client.search( index="example_points", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_shape": { "location": { "shape": { "type": "envelope", "coordinates": [ [ 13, 53 ], [ 14, 52 ] ] }, "relation": "intersects" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f298c4eb50ea97b34c57f8756eb350d3.asciidoc000066400000000000000000000002431506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/pending_tasks.asciidoc:57 [source, python] ---- resp = client.cat.pending_tasks( v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f29a28fffa7ec604a33a838f48f7ea79.asciidoc000066400000000000000000000015671506101734100266770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query_filter_context.asciidoc:81 [source, python] ---- resp = client.search( query={ "bool": { "must": [ { "match": { "title": "Search" } }, { "match": { "content": "Elasticsearch" } } ], "filter": [ { "term": { "status": "published" } }, { "range": { "publish_date": { "gte": "2015-01-01" } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f29b2674299ddf51a25ed87619025ede.asciidoc000066400000000000000000000005251506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-search.asciidoc:122 [source, python] ---- resp = client.rollup.rollup_search( index="sensor_rollup", size=0, aggregations={ "max_temperature": { "max": { "field": "temperature" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2a5f77f929cc7b893b80f4bd5b1a192.asciidoc000066400000000000000000000003031506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/get-connector-api.asciidoc:74 [source, python] ---- resp = client.connector.get( connector_id="my-connector", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2b2d62bc0a44940ad14fca57d6d008a.asciidoc000066400000000000000000000052171506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/examples.asciidoc:215 [source, python] ---- resp = client.transform.put_transform( transform_id="suspicious_client_ips", source={ "index": "kibana_sample_data_logs" }, dest={ "index": "sample_weblogs_by_clientip" }, sync={ "time": { "field": "timestamp", "delay": "60s" } }, pivot={ "group_by": { "clientip": { "terms": { "field": "clientip" } } }, "aggregations": { "url_dc": { "cardinality": { "field": "url.keyword" } }, "bytes_sum": { "sum": { "field": "bytes" } }, "geo.src_dc": { "cardinality": { "field": "geo.src" } }, "agent_dc": { "cardinality": { "field": "agent.keyword" } }, "geo.dest_dc": { "cardinality": { "field": "geo.dest" } }, "responses.total": { "value_count": { "field": "timestamp" } }, "success": { "filter": { "term": { "response": "200" } } }, "error404": { "filter": { "term": { "response": "404" } } }, "error5xx": { "filter": { "range": { "response": { "gte": 500, "lt": 600 } } } }, "timestamp.min": { "min": { "field": "timestamp" } }, "timestamp.max": { "max": { "field": "timestamp" } }, "timestamp.duration_ms": { "bucket_script": { "buckets_path": { "min_time": "timestamp.min.value", "max_time": "timestamp.max.value" }, "script": "(params.max_time - params.min_time)" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2c9afd052878b2ec00908739b0d0f74.asciidoc000066400000000000000000000025271506101734100264300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:697 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "rename": { "description": "Rename 'provider' to 'cloud.provider'", "field": "provider", "target_field": "cloud.provider", "on_failure": [ { "set": { "description": "Set 'error.message'", "field": "error.message", "value": "Field 'provider' does not exist. Cannot rename to 'cloud.provider'", "override": False, "on_failure": [ { "set": { "description": "Set 'error.message.multi'", "field": "error.message.multi", "value": "Document encountered multiple ingest errors", "override": True } } ] } } ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2e854b6c99659ccc1824e86c096e433.asciidoc000066400000000000000000000003451506101734100263730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/auto-follow/resume-auto-follow-pattern.asciidoc:86 [source, python] ---- resp = client.ccr.resume_auto_follow_pattern( name="my_auto_follow_pattern", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2ec53c0ef5025de8890d0ff8ec287a0.asciidoc000066400000000000000000000010101506101734100266340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rank-eval.asciidoc:359 [source, python] ---- resp = client.rank_eval( index="my-index-000001", requests=[ { "id": "JFK query", "request": { "query": { "match_all": {} } }, "ratings": [] } ], metric={ "mean_reciprocal_rank": { "k": 20, "relevant_rating_threshold": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f2f1cae094855a45fd8f73478bec8e70.asciidoc000066400000000000000000000005001506101734100265750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/split-index.asciidoc:209 [source, python] ---- resp = client.indices.split( index="my_source_index", target="my_target_index", settings={ "index.number_of_shards": 5 }, aliases={ "my_search_indices": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f321d4e92aa83d573ecf52bf56b0b774.asciidoc000066400000000000000000000005241506101734100265610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:377 [source, python] ---- resp = client.perform_request( "POST", "/_connector/_sync_job", headers={"Content-Type": "application/json"}, body={ "id": "my-connector-id", "job_type": "full" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f329242d7c8406297eff9bf609870c37.asciidoc000066400000000000000000000006701506101734100263140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/suggesters/completion-suggest.asciidoc:304 [source, python] ---- resp = client.search( index="music", pretty=True, suggest={ "song-suggest": { "prefix": "nor", "completion": { "field": "suggest", "fuzzy": { "fuzziness": 2 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f32f0c19b42de3b87dd764fe4ca17e7c.asciidoc000066400000000000000000000005161506101734100267260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/query-string-query.asciidoc:420 [source, python] ---- resp = client.search( query={ "query_string": { "default_field": "title", "query": "ny city", "auto_generate_synonyms_phrase_query": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f342465c65ba76383dedbb334b57b616.asciidoc000066400000000000000000000013121506101734100264150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/index-options.asciidoc:32 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "text": { "type": "text", "index_options": "offsets" } } }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "text": "Quick brown fox" }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match": { "text": "brown fox" } }, highlight={ "fields": { "text": {} } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/f34c02351662481dd61a5c2a3e206c60.asciidoc000066400000000000000000000010131506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/hyphenation-decompounder-tokenfilter.asciidoc:25 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ { "type": "hyphenation_decompounder", "hyphenation_patterns_path": "analysis/hyphenation_patterns.xml", "word_list": [ "Kaffee", "zucker", "tasse" ] } ], text="Kaffeetasse", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3594de7ef39ab09b0bb12c1e76bfe6b.asciidoc000066400000000000000000000005041506101734100267770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/shrink-index.asciidoc:125 [source, python] ---- resp = client.indices.shrink( index="my_source_index", target="my_target_index", settings={ "index.routing.allocation.require._name": None, "index.blocks.write": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3697682a886ab129530f3e5c1b30632.asciidoc000066400000000000000000000002711506101734100262020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/termvectors.asciidoc:16 [source, python] ---- resp = client.termvectors( index="my-index-000001", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f37173a75cd1b0d683c6f67819dd1de3.asciidoc000066400000000000000000000002621506101734100265100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:800 [source, python] ---- resp = client.get( index="my-new-index-000001", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f388e571224dd6850f8c9f9f08fca3da.asciidoc000066400000000000000000000003151506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/invalidate-api-keys.asciidoc:129 [source, python] ---- resp = client.security.invalidate_api_key( name="my-api-key", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3942d9b34138dfca79dff707af270b7.asciidoc000066400000000000000000000004731506101734100266010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:1169 [source, python] ---- resp = client.eql.search( index="my-data-stream", timestamp_field="file.accessed", event_category_field="file.type", query="\n file where (file.size > 1 and file.type == \"file\")\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f39512478cae2db8f4566a1e4af9e8f5.asciidoc000066400000000000000000000017421506101734100266060ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/rollup-getting-started.asciidoc:217 [source, python] ---- resp = client.rollup.rollup_search( index="sensor_rollup", size=0, aggregations={ "timeline": { "date_histogram": { "field": "timestamp", "fixed_interval": "7d" }, "aggs": { "nodes": { "terms": { "field": "node" }, "aggs": { "max_temperature": { "max": { "field": "temperature" } }, "avg_voltage": { "avg": { "field": "voltage" } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3ab820e1f2f54ea718017aeae865742.asciidoc000066400000000000000000000010361506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/oidc-guide.asciidoc:470 [source, python] ---- resp = client.security.put_role_mapping( name="oidc-finance", roles=[ "finance_data" ], enabled=True, rules={ "all": [ { "field": { "realm.name": "oidc1" } }, { "field": { "groups": "finance-team" } } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3b185131f40687c25d2f85e1231d8bd.asciidoc000066400000000000000000000003201506101734100263270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/validate.asciidoc:105 [source, python] ---- resp = client.indices.validate_query( index="my-index-000001", q="user.id:kimchy", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3b4ddce8ff21fc1a76a7c0d9c36650e.asciidoc000066400000000000000000000006331506101734100270020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-shrink.asciidoc:65 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "shrink": { "number_of_shards": 1 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3c696cd63a3f042e62cbb94b75c2427.asciidoc000066400000000000000000000003421506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // upgrade/archived-settings.asciidoc:24 [source, python] ---- resp = client.cluster.get_settings( flat_settings=True, filter_path="persistent.archived*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3e1dfe1c440e3590be26f265e19425d.asciidoc000066400000000000000000000014271506101734100265020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:235 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "1 / (1 + l2norm(params.queryVector, 'my_dense_vector'))", "params": { "queryVector": [ 4, 3.4, -0.2 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3fb3cba44988b6e9fee93316138b2cf.asciidoc000066400000000000000000000003431506101734100266560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-privileges-cache.asciidoc:56 [source, python] ---- resp = client.security.clear_cached_privileges( application="myapp,my-other-app", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3fb52680482925c202c2e2f8af6f044.asciidoc000066400000000000000000000002771506101734100263440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:459 [source, python] ---- resp = client.cat.count( index="my-index-000001", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f3fe2012557ebbce1ebad4fc997c092d.asciidoc000066400000000000000000000005021506101734100267730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/register-fs-repo.asciidoc:32 [source, python] ---- resp = client.snapshot.create_repository( name="my_fs_backup", repository={ "type": "fs", "settings": { "location": "my_fs_backup_location" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f43d551aaaad73d979adf1b86533e6a3.asciidoc000066400000000000000000000005711506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:216 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "fixed_interval": "2w" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f43ec4041e3b72bbde063452990bfc4b.asciidoc000066400000000000000000000003101506101734100265350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/clearcache.asciidoc:148 [source, python] ---- resp = client.indices.clear_cache( index="my-index-000001,my-index-000002", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f44d287c6937785eb09b91353c1deb1e.asciidoc000066400000000000000000000003501506101734100264330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-datafeed-stats.asciidoc:183 [source, python] ---- resp = client.ml.get_datafeed_stats( datafeed_id="datafeed-high_sum_total_sales", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f453e14bcf30853e57618bf12f83e148.asciidoc000066400000000000000000000012311506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/pattern-analyzer.asciidoc:385 [source, python] ---- resp = client.indices.create( index="pattern_example", settings={ "analysis": { "tokenizer": { "split_on_non_word": { "type": "pattern", "pattern": "\\W+" } }, "analyzer": { "rebuilt_pattern": { "tokenizer": "split_on_non_word", "filter": [ "lowercase" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f454e3f8ad5f5bd82a4a25af7dee9ca1.asciidoc000066400000000000000000000020171506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/array.asciidoc:39 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "message": "some arrays in this document...", "tags": [ "elasticsearch", "wow" ], "lists": [ { "name": "prog_list", "description": "programming list" }, { "name": "cool_list", "description": "cool stuff list" } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="2", document={ "message": "no arrays in this document...", "tags": "elasticsearch", "lists": { "name": "prog_list", "description": "programming list" } }, ) print(resp1) resp2 = client.search( index="my-index-000001", query={ "match": { "tags": "elasticsearch" } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/f45990264f8755b96b11c69c12c90ff4.asciidoc000066400000000000000000000003071506101734100263030ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/troubleshooting-searches.asciidoc:21 [source, python] ---- resp = client.indices.exists( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f495f9c99916a05e3b28166d31955fad.asciidoc000066400000000000000000000010641506101734100263700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/terms-aggregation.asciidoc:292 [source, python] ---- resp = client.search( aggs={ "genres": { "terms": { "field": "genre", "order": { "playback_stats.max": "desc" } }, "aggs": { "playback_stats": { "stats": { "field": "play_count" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f49ac80f0130cae8d0ea6f4472a149dd.asciidoc000066400000000000000000000010611506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/knn-query.asciidoc:18 [source, python] ---- resp = client.indices.create( index="my-image-index", mappings={ "properties": { "image-vector": { "type": "dense_vector", "dims": 3, "index": True, "similarity": "l2_norm" }, "file-type": { "type": "keyword" }, "title": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4ae3f3fbf07a7d39122ac5ac20b9c03.asciidoc000066400000000000000000000011321506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/knn-search.asciidoc:280 [source, python] ---- resp = client.indices.create( index="quantized-image-index", mappings={ "properties": { "image-vector": { "type": "dense_vector", "element_type": "float", "dims": 2, "index": True, "index_options": { "type": "int8_hnsw" } }, "title": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4b9baed3c6a82be3672cbc8999c2368.asciidoc000066400000000000000000000003201506101734100266530ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/terms-enum.asciidoc:19 [source, python] ---- resp = client.terms_enum( index="stackoverflow", field="tags", string="kiba", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4c194628761a4cf2a01453a96bbcc3c.asciidoc000066400000000000000000000044001506101734100264630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:344 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "multipolygon", "coordinates": [ [ [ [ 1002, 200 ], [ 1003, 200 ], [ 1003, 300 ], [ 1002, 300 ], [ 1002, 200 ] ] ], [ [ [ 1000, 200 ], [ 1001, 100 ], [ 1001, 100 ], [ 1000, 100 ], [ 1000, 100 ] ], [ [ 1000.2, 200.2 ], [ 1000.8, 100.2 ], [ 1000.8, 100.8 ], [ 1000.2, 100.8 ], [ 1000.2, 100.2 ] ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4dc1286d0a2f8d1fde64fbf12fd9f8d.asciidoc000066400000000000000000000014551506101734100270760ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // troubleshooting/common-issues/disk-usage-exceeded.asciidoc:90 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster.routing.allocation.disk.watermark.low": None, "cluster.routing.allocation.disk.watermark.low.max_headroom": None, "cluster.routing.allocation.disk.watermark.high": None, "cluster.routing.allocation.disk.watermark.high.max_headroom": None, "cluster.routing.allocation.disk.watermark.flood_stage": None, "cluster.routing.allocation.disk.watermark.flood_stage.max_headroom": None, "cluster.routing.allocation.disk.watermark.flood_stage.frozen": None, "cluster.routing.allocation.disk.watermark.flood_stage.frozen.max_headroom": None }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4f557716049b23f8840d58d71e748f0.asciidoc000066400000000000000000000004261506101734100262300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/update-settings.asciidoc:121 [source, python] ---- resp = client.indices.put_settings( index="my-index-000001", settings={ "index": { "refresh_interval": "-1" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f4fdfe52ecba65eec6beb30d8deb8bbf.asciidoc000066400000000000000000000006001506101734100274360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-forget-follower.asciidoc:41 [source, python] ---- resp = client.ccr.forget_follower( index="", follower_cluster="", follower_index="", follower_index_uuid="", leader_remote_cluster="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5013174f77868da4dc40cdd745d4ea4.asciidoc000066400000000000000000000005051506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/rare-terms-aggregation.asciidoc:130 [source, python] ---- resp = client.search( aggs={ "genres": { "rare_terms": { "field": "genre", "max_doc_count": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5140f08f56c64b5789357539f8b9ba8.asciidoc000066400000000000000000000003161506101734100263150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-alias.asciidoc:16 [source, python] ---- resp = client.indices.delete_alias( index="my-data-stream", name="my-alias", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f545bb95214769aca993c1632a71ad2c.asciidoc000066400000000000000000000033051506101734100264160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:785 [source, python] ---- resp = client.indices.create( index="french_example", settings={ "analysis": { "filter": { "french_elision": { "type": "elision", "articles_case": True, "articles": [ "l", "m", "t", "qu", "n", "s", "j", "d", "c", "jusqu", "quoiqu", "lorsqu", "puisqu" ] }, "french_stop": { "type": "stop", "stopwords": "_french_" }, "french_keywords": { "type": "keyword_marker", "keywords": [ "Example" ] }, "french_stemmer": { "type": "stemmer", "language": "light_french" } }, "analyzer": { "rebuilt_french": { "tokenizer": "standard", "filter": [ "french_elision", "lowercase", "french_stop", "french_keywords", "french_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f54f6d06163221f2c7aff6e8db942be3.asciidoc000066400000000000000000000007401506101734100265640ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/take-snapshot.asciidoc:579 [source, python] ---- resp = client.slm.put_lifecycle( policy_id="daily-snapshots", name="", schedule="0 45 23 * * ?", repository="my_repository", config={ "indices": "*", "include_global_state": True }, retention={ "expire_after": "30d", "min_count": 1, "max_count": 31 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f57ce7de0946e9416ddb9150e95f4b74.asciidoc000066400000000000000000000007751506101734100265400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-azure-openai.asciidoc:165 [source, python] ---- resp = client.inference.put( task_type="completion", inference_id="azure_openai_completion", inference_config={ "service": "azureopenai", "service_settings": { "api_key": "", "resource_name": "", "deployment_id": "", "api_version": "2024-02-01" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5815d573cee0447910c9668003887b8.asciidoc000066400000000000000000000005741506101734100261520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/datehistogram-aggregation.asciidoc:122 [source, python] ---- resp = client.search( index="sales", size="0", aggs={ "sales_over_time": { "date_histogram": { "field": "date", "calendar_interval": "2d" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f58969ac405db85f439c5940d014964b.asciidoc000066400000000000000000000010101506101734100262720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-bounding-box-query.asciidoc:271 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_bounding_box": { "pin.location": { "wkt": "BBOX (-74.1, -71.12, 40.73, 40.01)" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f58fd031597e2c3df78bf0efd07206e3.asciidoc000066400000000000000000000002701506101734100265670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/start-basic.asciidoc:68 [source, python] ---- resp = client.license.post_start_basic( acknowledge=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5bf2526af19d964f8c4c59d4795cffc.asciidoc000066400000000000000000000013001506101734100266670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/mlt-query.asciidoc:121 [source, python] ---- resp = client.indices.create( index="imdb", mappings={ "properties": { "title": { "type": "text", "term_vector": "yes" }, "description": { "type": "text" }, "tags": { "type": "text", "fields": { "raw": { "type": "text", "analyzer": "keyword", "term_vector": "yes" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5cbbb60ca26867a5d2da625a68a6e65.asciidoc000066400000000000000000000016131506101734100266360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // transform/ecommerce-tutorial.asciidoc:337 [source, python] ---- resp = client.indices.create( index="ecommerce-customers", mappings={ "properties": { "total_quantity.sum": { "type": "double" }, "total_quantity": { "type": "object" }, "taxless_total_price": { "type": "object" }, "taxless_total_price.sum": { "type": "double" }, "order_id.cardinality": { "type": "long" }, "customer_id": { "type": "keyword" }, "total_quantity.max": { "type": "integer" }, "order_id": { "type": "object" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5e50fe8a60467adb2c5ee9e0f2d88da.asciidoc000066400000000000000000000004351506101734100270120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:348 [source, python] ---- resp = client.sql.clear_cursor( cursor="sDXF1ZXJ5QW5kRmV0Y2gBAAAAAAAAAAEWYUpOYklQMHhRUEtld3RsNnFtYU1hQQ==:BAFmBGRhdGUBZgVsaWtlcwFzB21lc3NhZ2UBZgR1c2Vy9f///w8=", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5e6378cc41ddf5326fe4084396c59b2.asciidoc000066400000000000000000000007141506101734100264430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/specify-analyzer.asciidoc:186 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "default": { "type": "simple" }, "default_search": { "type": "whitespace" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5eed3f2e3558a238487bc85305b7a71.asciidoc000066400000000000000000000004251506101734100264320ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:241 [source, python] ---- resp = client.index( index="example", document={ "location": "POLYGON ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f5ef80dd92c67059ca353a833e6b7b5e.asciidoc000066400000000000000000000007461506101734100266020ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/sum-aggregation.asciidoc:14 [source, python] ---- resp = client.search( index="sales", size="0", query={ "constant_score": { "filter": { "match": { "type": "hat" } } } }, aggs={ "hat_prices": { "sum": { "field": "price" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f625fdbbe78c4198d9e40b35f3f008b3.asciidoc000066400000000000000000000004121506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-known-issues.asciidoc:99 [source, python] ---- resp = client.update( index=".elastic-connectors", id="connector-id", doc={ "custom_scheduling": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f63f6343e74bd5c844854272e746de14.asciidoc000066400000000000000000000003071506101734100263010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/deactivate-watch.asciidoc:88 [source, python] ---- resp = client.watcher.deactivate_watch( watch_id="my_watch", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f642b64e592131f37209a5100fe161cc.asciidoc000066400000000000000000000017441506101734100262470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/dynamic/templates.asciidoc:425 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "dynamic_templates": [ { "named_analyzers": { "match_mapping_type": "string", "match": "*", "mapping": { "type": "text", "analyzer": "{name}" } } }, { "no_doc_values": { "match_mapping_type": "*", "mapping": { "type": "{dynamic_type}", "doc_values": False } } } ] }, ) print(resp) resp1 = client.index( index="my-index-000001", id="1", document={ "english": "Some English text", "count": 5 }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/f6566395f85d3afe917228643d7318d6.asciidoc000066400000000000000000000002701506101734100262340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:469 [source, python] ---- resp = client.indices.delete( index="my-index-000001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f656c1e64268293ecc8ebd8065628faa.asciidoc000066400000000000000000000004101506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-service-token-caches.asciidoc:76 [source, python] ---- resp = client.security.clear_cached_service_tokens( namespace="elastic", service="fleet-server", name="*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f65abb38dd0cfedeb06e0cef206fbdab.asciidoc000066400000000000000000000003771506101734100273470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/ngram-tokenfilter.asciidoc:30 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "ngram" ], text="Quick fox", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f66643c54999426c5afa6d5a87435d4e.asciidoc000066400000000000000000000003261506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-api-key-cache.asciidoc:49 [source, python] ---- resp = client.security.clear_api_key_cache( ids="yVGMr3QByxdh1MSaicYx", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f679e414de48b8fe25e458844be05618.asciidoc000066400000000000000000000004361506101734100263750ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/connectors-API-tutorial.asciidoc:179 [source, python] ---- resp = client.connector.put( connector_id="my-connector-id", name="Music catalog", index_name="music", service_type="postgresql", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f67d8aab9106ad24b1d2c771d3840ed1.asciidoc000066400000000000000000000034071506101734100265500ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // watcher/actions.asciidoc:276 [source, python] ---- resp = client.watcher.put_watch( id="log_event_watch", trigger={ "schedule": { "interval": "5m" } }, input={ "search": { "request": { "indices": "log-events", "body": { "size": 0, "query": { "match": { "status": "error" } } } } } }, condition={ "compare": { "ctx.payload.hits.total": { "gt": 0 } } }, actions={ "email_administrator": { "email": { "to": "sys.admino@host.domain", "subject": "Encountered {{ctx.payload.hits.total}} errors", "body": "Too many error in the system, see attached data", "attachments": { "attached_data": { "data": { "format": "json" } } }, "priority": "high" } }, "notify_pager": { "condition": { "compare": { "ctx.payload.hits.total": { "gt": 5 } } }, "webhook": { "method": "POST", "host": "pager.service.domain", "port": 1234, "path": "/{{watch_id}}", "body": "Encountered {{ctx.payload.hits.total}} errors" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6911b0f2f56523ccbd8027f276981b3.asciidoc000066400000000000000000000006161506101734100263460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/combined-fields-query.asciidoc:15 [source, python] ---- resp = client.search( query={ "combined_fields": { "query": "database systems", "fields": [ "title", "abstract", "body" ], "operator": "and" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6982ff80b9a64cd5fcac5b20908c906.asciidoc000066400000000000000000000004041506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/delete-calendar-event.asciidoc:49 [source, python] ---- resp = client.ml.delete_calendar_event( calendar_id="planned-outages", event_id="LS8LJGEBMTCMA-qz49st", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6c9d72fa26cbedd0c3f9fa64a88c38a.asciidoc000066400000000000000000000003321506101734100270700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/alias.asciidoc:86 [source, python] ---- resp = client.search( query={ "match_all": {} }, source="route_length_miles", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6d493650b4344f17297b568016fb445.asciidoc000066400000000000000000000002741506101734100261360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/apis/follow/post-unfollow.asciidoc:39 [source, python] ---- resp = client.ccr.unfollow( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6de702c3d097af0b0bd391c4f947233.asciidoc000066400000000000000000000004511506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/disk/decrease-data-node-disk-usage.asciidoc:103 [source, python] ---- resp = client.cat.indices( v=True, s="rep:desc,pri.store.size:desc", h="health,index,pri,rep,store.size,pri.store.size", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6df4acf3c7a4f85706ff314b21ebcb2.asciidoc000066400000000000000000000003261506101734100267740ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/clear-privileges-cache.asciidoc:49 [source, python] ---- resp = client.security.clear_cached_privileges( application="myapp", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6ead39c5505045543b9225deca7367d.asciidoc000066400000000000000000000003321506101734100264210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cluster/voting-exclusions.asciidoc:115 [source, python] ---- resp = client.cluster.post_voting_config_exclusions( node_names="nodeName1,nodeName2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6edbed2b5b2709bbc13866a4780e27a.asciidoc000066400000000000000000000006251506101734100266400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/params/dynamic.asciidoc:9 [source, python] ---- resp = client.index( index="my-index-000001", id="1", document={ "username": "johnsmith", "name": { "first": "John", "last": "Smith" } }, ) print(resp) resp1 = client.indices.get_mapping( index="my-index-000001", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/f6eff830fb0fad200ebfb1e3e46f6f0e.asciidoc000066400000000000000000000007011506101734100271270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/execute-watch.asciidoc:161 [source, python] ---- resp = client.watcher.execute_watch( id="my_watch", trigger_data={ "triggered_time": "now", "scheduled_time": "now" }, alternative_input={ "foo": "bar" }, ignore_condition=True, action_modes={ "my-action": "force_simulate" }, record_execution=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f6f647eb644a2d236637ff05f833cb73.asciidoc000066400000000000000000000005021506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/_connectors-create-native-api-key.asciidoc:43 [source, python] ---- resp = client.perform_request( "POST", "/_connector/_secret", headers={"Content-Type": "application/json"}, body={ "value": "encoded_api_key" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f70a54cd9a9f4811bf962e469f2ca2ea.asciidoc000066400000000000000000000004671506101734100266570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:91 [source, python] ---- resp = client.search( query={ "bool": { "filter": { "term": { "status": "active" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f70ff57c80cdbce3f1e7c63ee307c92d.asciidoc000066400000000000000000000004471506101734100270140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:508 [source, python] ---- resp = client.reindex( source={ "index": "my_test_scores" }, dest={ "index": "my_test_scores_2", "pipeline": "my_test_scores_pipeline" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7139b3c0e066be832b9100ae17157cc.asciidoc000066400000000000000000000004411506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // esql/esql-rest.asciidoc:50 [source, python] ---- resp = client.esql.query( format="txt", query="\n FROM library\n | KEEP author, name, page_count, release_date\n | SORT page_count DESC\n | LIMIT 5\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f733b25cd4c448b226bb76862974eef2.asciidoc000066400000000000000000000014771506101734100264440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc:51 [source, python] ---- resp = client.indices.create( index="test", settings={ "analysis": { "filter": { "code": { "type": "pattern_capture", "preserve_original": True, "patterns": [ "(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)", "(\\d+)" ] } }, "analyzer": { "code": { "tokenizer": "pattern", "filter": [ "code", "lowercase" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f749efe8f11ebd43ef83db91922c736e.asciidoc000066400000000000000000000011571506101734100266710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ccr/uni-directional-disaster-recovery.asciidoc:133 [source, python] ---- resp = client.cluster.put_settings( persistent={ "cluster": { "remote": { "clusterB": { "mode": "proxy", "skip_unavailable": "true", "server_name": "clusterb.es.region-b.gcp.elastic-cloud.com", "proxy_socket_connections": "18", "proxy_address": "clusterb.es.region-b.gcp.elastic-cloud.com:9400" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7726cc2c60dea26b88bf0df99fb0813.asciidoc000066400000000000000000000004621506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:197 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "runtime": { "day_of_week": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f785b5d17eb59f8d2a353c2dee66eb5b.asciidoc000066400000000000000000000003531506101734100267360ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/get-connector-sync-job-api.asciidoc:51 [source, python] ---- resp = client.perform_request( "GET", "/_connector/_sync_job/my-connector-sync-job", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7b20e4bb8366f6d2e4486f3bf4211bc.asciidoc000066400000000000000000000012711506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/histogram-aggregation.asciidoc:201 [source, python] ---- resp = client.search( index="sales", size="0", query={ "constant_score": { "filter": { "range": { "price": { "lte": "500" } } } } }, aggs={ "prices": { "histogram": { "field": "price", "interval": 50, "hard_bounds": { "min": 100, "max": 200 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7d3d367a3d8e8ff0eca426b6ea85252.asciidoc000066400000000000000000000004071506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/tsds-reindex.asciidoc:222 [source, python] ---- resp = client.reindex( source={ "index": "k8s" }, dest={ "index": "k9s", "op_type": "create" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7dc2fed08e57abda2c3e8a14f8eb098.asciidoc000066400000000000000000000021411506101734100270630ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/lang-analyzer.asciidoc:136 [source, python] ---- resp = client.indices.create( index="armenian_example", settings={ "analysis": { "filter": { "armenian_stop": { "type": "stop", "stopwords": "_armenian_" }, "armenian_keywords": { "type": "keyword_marker", "keywords": [ "օրինակ" ] }, "armenian_stemmer": { "type": "stemmer", "language": "armenian" } }, "analyzer": { "rebuilt_armenian": { "tokenizer": "standard", "filter": [ "lowercase", "armenian_stop", "armenian_keywords", "armenian_stemmer" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f7ec9062b3a7578fed55f119d7c22b74.asciidoc000066400000000000000000000004141506101734100265150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/testing.asciidoc:62 [source, python] ---- resp = client.indices.analyze( tokenizer="standard", filter=[ "lowercase", "asciifolding" ], text="Is this déja vu?", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f823e4b87ed181b27f73ebc51351f0ee.asciidoc000066400000000000000000000003041506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/delete-data-stream.asciidoc:32 [source, python] ---- resp = client.indices.delete_data_stream( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f83eb6605c7c56e297a494b318400ef0.asciidoc000066400000000000000000000010221506101734100263400ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/filter-search-results.asciidoc:58 [source, python] ---- resp = client.search( index="shirts", query={ "bool": { "filter": [ { "term": { "color": "red" } }, { "term": { "brand": "gucci" } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f86337e13526c968848cfe29a52d658f.asciidoc000066400000000000000000000007761506101734100263330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/inference-api/infer-api-ingest-pipeline.asciidoc:41 [source, python] ---- resp = client.ingest.put_pipeline( id="elser_embeddings_pipeline", processors=[ { "inference": { "model_id": "elser_embeddings", "input_output": { "input_field": "content", "output_field": "content_embedding" } } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8651356ce2e7e93fa306c30f57ed588.asciidoc000066400000000000000000000007571506101734100264540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/truncate-tokenfilter.asciidoc:93 [source, python] ---- resp = client.indices.create( index="custom_truncate_example", settings={ "analysis": { "analyzer": { "standard_truncate": { "tokenizer": "standard", "filter": [ "truncate" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8833488041f3d318435b60917fa877c.asciidoc000066400000000000000000000015071506101734100261440ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-overview.asciidoc:98 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "my_search_index1", "my_search_index2" ], "template": { "script": { "source": { "query": { "query_string": { "query": "{{query_string}}", "default_field": "{{default_field}}" } } }, "params": { "query_string": "*", "default_field": "*" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8a0010753b1ff563dc42d703902d2fa.asciidoc000066400000000000000000000017511506101734100264010ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:39 [source, python] ---- resp = client.search( query={ "bool": { "must": { "term": { "user.id": "kimchy" } }, "filter": { "term": { "tags": "production" } }, "must_not": { "range": { "age": { "gte": 10, "lte": 20 } } }, "should": [ { "term": { "tags": "env1" } }, { "term": { "tags": "deployed" } } ], "minimum_should_match": 1, "boost": 1 } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8cafb1a08bc9b2dd5239f99d4e93f4c.asciidoc000066400000000000000000000005451506101734100270210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenizers/chargroup-tokenizer.asciidoc:33 [source, python] ---- resp = client.indices.analyze( tokenizer={ "type": "char_group", "tokenize_on_chars": [ "whitespace", "-", "\n" ] }, text="The QUICK brown-fox", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8cb1a04c2e487ff006b5ae0e1a7afbd.asciidoc000066400000000000000000000002661506101734100270420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/apis/rollup-caps.asciidoc:181 [source, python] ---- resp = client.rollup.get_rollup_caps( id="sensor-1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f8f960550104c33e00dc78bc8723ccef.asciidoc000066400000000000000000000003041506101734100264730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // quickstart/full-text-filtering-tutorial.asciidoc:42 [source, python] ---- resp = client.indices.create( index="cooking_blog", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f92d2f5018a8843ffbb56ade15f84406.asciidoc000066400000000000000000000002461506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // licensing/get-basic-status.asciidoc:41 [source, python] ---- resp = client.license.get_basic_status() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f95a4d7ab02bf400246c8822f0245f02.asciidoc000066400000000000000000000002731506101734100263210ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // cat/trainedmodel.asciidoc:124 [source, python] ---- resp = client.cat.ml_trained_models( h="c,o,l,ct,v", v=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f96d4614f2fc294339fef325b794355f.asciidoc000066400000000000000000000003701506101734100263710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/get-bucket.asciidoc:208 [source, python] ---- resp = client.ml.get_buckets( job_id="low_request_rate", anomaly_score=80, start="1454530200001", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f96d8131e8a592fbf6dfd686173940a9.asciidoc000066400000000000000000000010231506101734100264460ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/watcher/update-settings.asciidoc:22 [source, python] ---- resp = client.watcher.put_watch( id="test_watch", trigger={ "schedule": { "hourly": { "minute": [ 0, 5 ] } } }, input={ "simple": { "payload": { "send": "yes" } } }, condition={ "always": {} }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f9732ce07960134ea7156e118c2da8a6.asciidoc000066400000000000000000000006611506101734100263430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/analyzers/simple-analyzer.asciidoc:134 [source, python] ---- resp = client.indices.create( index="my-index-000001", settings={ "analysis": { "analyzer": { "my_custom_simple_analyzer": { "tokenizer": "lowercase", "filter": [] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f978088f5117d4addd55c11ee3777312.asciidoc000066400000000000000000000004031506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/get-service-credentials.asciidoc:56 [source, python] ---- resp = client.security.create_service_token( namespace="elastic", service="fleet-server", name="token1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f97aa2efabbf11a534073041eb2658c9.asciidoc000066400000000000000000000003041506101734100265410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/apis/delete-stored-script-api.asciidoc:30 [source, python] ---- resp = client.delete_script( id="my-stored-script", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f98687271e1bec031cc34d05d8f4b60b.asciidoc000066400000000000000000000005771506101734100265070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/span-multi-term-query.asciidoc:12 [source, python] ---- resp = client.search( query={ "span_multi": { "match": { "prefix": { "user.id": { "value": "ki" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f994498dd6576be657dedce2822d2b9e.asciidoc000066400000000000000000000021301506101734100266150ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-text-hybrid-search:119 [source, python] ---- resp = client.search( index="semantic-embeddings", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "match": { "content": "How to avoid muscle soreness while running?" } } } }, { "standard": { "query": { "semantic": { "field": "semantic_text", "query": "How to avoid muscle soreness while running?" } } } } ] } }, highlight={ "fields": { "semantic_text": { "number_of_fragments": 2 } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f9a315ea99bed0cf2f36be1d74eb3e4a.asciidoc000066400000000000000000000006201506101734100270550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:407 [source, python] ---- resp = client.index( index="example", document={ "location": "MULTIPOLYGON (((102.0 2.0, 103.0 2.0, 103.0 3.0, 102.0 3.0, 102.0 2.0)), ((100.0 0.0, 101.0 0.0, 101.0 1.0, 100.0 1.0, 100.0 0.0), (100.2 0.2, 100.8 0.2, 100.8 0.8, 100.2 0.8, 100.2 0.2)))" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f9bad6fd369764185e1cb09b89ee39cc.asciidoc000066400000000000000000000013231506101734100266670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/text.asciidoc:237 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "text": { "type": "text", "store": True } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "text": [ "the quick brown fox", "the quick brown fox", "jumped over the lazy dog" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/f9c8245cc13770dff052b6759a749efa.asciidoc000066400000000000000000000002611506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/get.asciidoc:294 [source, python] ---- resp = client.get_source( index="my-index-000001", id="1", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/f9f541ae23a184301913f07e62d1afd3.asciidoc000066400000000000000000000004551506101734100264100ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // sql/endpoints/rest.asciidoc:657 [source, python] ---- resp = client.sql.query( format="json", keep_alive="2d", wait_for_completion_timeout="2s", query="SELECT * FROM library ORDER BY page_count DESC", fetch_size=5, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa42ae3bf6a300420cd0f77ba006458a.asciidoc000066400000000000000000000003131506101734100265170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:17 [source, python] ---- resp = client.indices.analyze( analyzer="standard", text="Quick Brown Foxes!", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa5dcd1c7fadc473a791daf0d7ceec36.asciidoc000066400000000000000000000010571506101734100272200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/metrics/geoline-aggregation.asciidoc:318 [source, python] ---- resp = client.search( index="tour", filter_path="aggregations", aggregations={ "path": { "time_series": {}, "aggregations": { "museum_tour": { "geo_line": { "point": { "field": "location" } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa61e3481b1f889b3bd4253866bb1c6b.asciidoc000066400000000000000000000062661506101734100265120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/bucket-correlation-aggregation.asciidoc:103 [source, python] ---- resp = client.search( index="correlate_latency", size="0", filter_path="aggregations", aggs={ "buckets": { "terms": { "field": "version", "size": 2 }, "aggs": { "latency_ranges": { "range": { "field": "latency", "ranges": [ { "to": 0 }, { "from": 0, "to": 105 }, { "from": 105, "to": 225 }, { "from": 225, "to": 445 }, { "from": 445, "to": 665 }, { "from": 665, "to": 885 }, { "from": 885, "to": 1115 }, { "from": 1115, "to": 1335 }, { "from": 1335, "to": 1555 }, { "from": 1555, "to": 1775 }, { "from": 1775 } ] } }, "bucket_correlation": { "bucket_correlation": { "buckets_path": "latency_ranges>_count", "function": { "count_correlation": { "indicator": { "expectations": [ 0, 52.5, 165, 335, 555, 775, 1000, 1225, 1445, 1665, 1775 ], "doc_count": 200 } } } } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa82d86a046d67366cfe9ce65535e433.asciidoc000066400000000000000000000010711506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // graph/explore.asciidoc:402 [source, python] ---- resp = client.graph.explore( index="clicklogs", vertices=[ { "field": "product", "include": [ "1854873" ] } ], connections={ "vertices": [ { "field": "query.raw", "exclude": [ "midi keyboard", "midi", "synth" ] } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa88f6f5a7d728ec4f1d05244228cb09.asciidoc000066400000000000000000000005751506101734100265200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/bool-query.asciidoc:110 [source, python] ---- resp = client.search( query={ "bool": { "must": { "match_all": {} }, "filter": { "term": { "status": "active" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa946228e946da256d40264c8b070a1a.asciidoc000066400000000000000000000005631506101734100263350ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations.asciidoc:241 [source, python] ---- resp = client.search( index="my-index-000001", aggs={ "my-agg-name": { "terms": { "field": "my-field" }, "meta": { "my-metadata-field": "foo" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fa9a3ef94470f3d9bd6500b65bf993d1.asciidoc000066400000000000000000000004051506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/multiplexer-tokenfilter.asciidoc:61 [source, python] ---- resp = client.indices.analyze( index="multiplexer_example", analyzer="my_analyzer", text="Going HOME", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fab4b811ba968aa4df92fb1ac059ea31.asciidoc000066400000000000000000000004641506101734100267650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/geo-shape.asciidoc:106 [source, python] ---- resp = client.indices.create( index="example", mappings={ "properties": { "location": { "type": "geo_shape" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fab702851e90e945c1b62dec0bb6a205.asciidoc000066400000000000000000000003741506101734100265430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // behavioral-analytics/apis/delete-analytics-collection.asciidoc:59 [source, python] ---- resp = client.search_application.delete_behavioral_analytics( name="my_analytics_collection", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fabe14480624a99e8ee42c7338672058.asciidoc000066400000000000000000000003121506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/create-index.asciidoc:270 [source, python] ---- resp = client.indices.create( index="test", wait_for_active_shards="2", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fad26f4fb5a1bc9c38db33394e877d94.asciidoc000066400000000000000000000003331506101734100266550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/df-analytics/apis/get-dfanalytics-stats.asciidoc:539 [source, python] ---- resp = client.ml.get_data_frame_analytics_stats( id="weblog-outliers", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fad524db23eb5718ff310956e590b00d.asciidoc000066400000000000000000000004761506101734100264770ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/function-score-query.asciidoc:241 [source, python] ---- resp = client.search( query={ "function_score": { "random_score": { "seed": 10, "field": "_seq_no" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/faf7d8b9827cf5c0db5c177f01dc31c4.asciidoc000066400000000000000000000010441506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/rank-eval.asciidoc:263 [source, python] ---- resp = client.rank_eval( index="my-index-000001", requests=[ { "id": "JFK query", "request": { "query": { "match_all": {} } }, "ratings": [] } ], metric={ "precision": { "k": 20, "relevant_rating_threshold": 1, "ignore_unlabeled": False } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb0152f6c70f647a8b6709969113486d.asciidoc000066400000000000000000000012651506101734100262230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/keyword.asciidoc:222 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "kwd": { "type": "keyword", "store": True } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "kwd": [ "foo", "foo", "bar", "baz" ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/fb1180992b2087dfb36576b44c4261e4.asciidoc000066400000000000000000000007001506101734100262560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/change-mappings-and-settings.asciidoc:249 [source, python] ---- resp = client.indices.put_mapping( index="my-data-stream", write_index_only=True, properties={ "host": { "properties": { "ip": { "type": "ip", "ignore_malformed": True } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb1263cfdcbb6a89b20b57004d7e0dfc.asciidoc000066400000000000000000000012071506101734100267610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/set.asciidoc:96 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "set": { "field": "my_field", "value": "{{{input_field.1}}}" } } ] }, docs=[ { "_index": "index", "_id": "id", "_source": { "input_field": [ "Ubuntu", "Windows", "Ventura" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb2b91206cfa8b86b4c7117ac1b5193b.asciidoc000066400000000000000000000016671506101734100265540ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/pipeline/cumulative-cardinality-aggregation.asciidoc:145 [source, python] ---- resp = client.search( index="user_hits", size=0, aggs={ "users_per_day": { "date_histogram": { "field": "timestamp", "calendar_interval": "day" }, "aggs": { "distinct_users": { "cardinality": { "field": "user_id" } }, "total_new_users": { "cumulative_cardinality": { "buckets_path": "distinct_users" } }, "incremental_new_users": { "derivative": { "buckets_path": "total_new_users" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb3505d976283fb7c7b9705a761e0dc2.asciidoc000066400000000000000000000015661506101734100264370ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:264 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "polygon", "orientation": "clockwise", "coordinates": [ [ [ 1000, 1000 ], [ 1000, 1001 ], [ 1001, 1001 ], [ 1001, 1000 ], [ 1000, 1000 ] ] ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb4799d2fe4011bf6084f89d97d9a4a5.asciidoc000066400000000000000000000003211506101734100265230ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // autoscaling/apis/get-autoscaling-policy.asciidoc:47 [source, python] ---- resp = client.autoscaling.get_autoscaling_policy( name="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb56c2ac77d4c308d7702b6b33698382.asciidoc000066400000000000000000000004451506101734100263510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/docs/_connectors-create-native-api-key.asciidoc:54 [source, python] ---- resp = client.connector.update_api_key_id( connector_id="my_connector_id>", api_key_id="API key_id", api_key_secret_id="secret_id", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fb955375a202f66133af009c04cb77ad.asciidoc000066400000000000000000000007171506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:17 [source, python] ---- resp = client.indices.create( index="networks", mappings={ "properties": { "range": { "type": "ip_range" }, "name": { "type": "keyword" }, "department": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fbb38243221c8fb311660616e3add9ce.asciidoc000066400000000000000000000011301506101734100264570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:420 [source, python] ---- resp = client.search( sort=[ { "_geo_distance": { "pin.location": [ -70, 40 ], "order": "asc", "unit": "km", "mode": "min", "distance_type": "arc", "ignore_unmapped": True } } ], query={ "term": { "user": "kimchy" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fbc5ab85b908480bf944b55da0a43488.asciidoc000066400000000000000000000004071506101734100265040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/prefix-query.asciidoc:16 [source, python] ---- resp = client.search( query={ "prefix": { "user.id": { "value": "ki" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fbdad6620eb645f5f1f02e3673604d01.asciidoc000066400000000000000000000007331506101734100264670ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/geo-distance-query.asciidoc:236 [source, python] ---- resp = client.search( index="my_locations", query={ "bool": { "must": { "match_all": {} }, "filter": { "geo_distance": { "distance": "12km", "pin.location": "drm3btev3e86" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc1907515f6a913884a9f86451e90ee8.asciidoc000066400000000000000000000010131506101734100263040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/semantic-search-elser.asciidoc:316 [source, python] ---- resp = client.indices.create( index="my-index", mappings={ "_source": { "excludes": [ "content_embedding" ] }, "properties": { "content_embedding": { "type": "sparse_vector" }, "content": { "type": "text" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc190fbbf71949331266dcb3f46a1198.asciidoc000066400000000000000000000003031506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/data-stream-stats.asciidoc:57 [source, python] ---- resp = client.indices.data_streams_stats( name="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc26f51bb22c0b5270a66b4722f18aa7.asciidoc000066400000000000000000000007271506101734100264620ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-allocate.asciidoc:60 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "allocate": { "number_of_replicas": 2, "total_shards_per_node": 200 } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc3f5f40fa283559ca615cd0eb0a1755.asciidoc000066400000000000000000000006111506101734100265450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/doc-count-field.asciidoc:34 [source, python] ---- resp = client.indices.create( index="my_index", mappings={ "properties": { "my_histogram": { "type": "histogram" }, "my_text": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc49437ce2e7916facf58128308c2aa3.asciidoc000066400000000000000000000007071506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // searchable-snapshots/apis/mount-snapshot.asciidoc:134 [source, python] ---- resp = client.searchable_snapshots.mount( repository="my_repository", snapshot="my_snapshot", wait_for_completion=True, index="my_docs", renamed_index="docs", index_settings={ "index.number_of_replicas": 0 }, ignore_index_settings=[ "index.refresh_interval" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc51fbc60b0e20aac83300a43ad90252.asciidoc000066400000000000000000000015421506101734100265130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/shape.asciidoc:375 [source, python] ---- resp = client.index( index="example", document={ "location": { "type": "geometrycollection", "geometries": [ { "type": "point", "coordinates": [ 1000, 100 ] }, { "type": "linestring", "coordinates": [ [ 1001, 100 ], [ 1002, 100 ] ] } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc5a81f34d416e4b45ca8a859dd3b8f1.asciidoc000066400000000000000000000006401506101734100266450ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/autodatehistogram-aggregation.asciidoc:190 [source, python] ---- resp = client.search( index="my-index-000001", size="0", aggs={ "by_day": { "auto_date_histogram": { "field": "date", "buckets": 3, "time_zone": "-01:00" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc75ea748e5f49b8ab292e453ab641a6.asciidoc000066400000000000000000000011131506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // aggregations/bucket/nested-aggregation.asciidoc:62 [source, python] ---- resp = client.search( index="products", size="0", query={ "match": { "name": "led tv" } }, aggs={ "resellers": { "nested": { "path": "resellers" }, "aggs": { "min_price": { "min": { "field": "resellers.price" } } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc8a426f8a5112e61e2acb913982a8d9.asciidoc000066400000000000000000000003761506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // index-modules/index-sorting.asciidoc:137 [source, python] ---- resp = client.search( index="events", size=10, sort=[ { "timestamp": "desc" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fc9a1b1173690a911725cff3912e9755.asciidoc000066400000000000000000000005341506101734100262700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ilm/actions/ilm-readonly.asciidoc:22 [source, python] ---- resp = client.ilm.put_lifecycle( name="my_policy", policy={ "phases": { "warm": { "actions": { "readonly": {} } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fccbddfba9f975de7e321732874dfb78.asciidoc000066400000000000000000000003251506101734100270260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/data-stream-stats.asciidoc:182 [source, python] ---- resp = client.indices.data_streams_stats( name="my-data-stream*", human=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fce5c03a388c893cb11a6696e068543f.asciidoc000066400000000000000000000024061506101734100264340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/has-privileges-user-profile.asciidoc:104 [source, python] ---- resp = client.security.has_privileges_user_profile( uids=[ "u_LQPnxDxEjIH0GOUoFkZr5Y57YUwSkL9Joiq-g4OCbPc_0", "u_rzRnxDgEHIH0GOUoFkZr5Y27YUwSk19Joiq=g4OCxxB_1", "u_does-not-exist_0" ], privileges={ "cluster": [ "monitor", "create_snapshot", "manage_ml" ], "index": [ { "names": [ "suppliers", "products" ], "privileges": [ "create_doc" ] }, { "names": [ "inventory" ], "privileges": [ "read", "write" ] } ], "application": [ { "application": "inventory_manager", "privileges": [ "read", "data:write/inventory" ], "resources": [ "product/1852563" ] } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fce7a35a737fc9e54ac1225e310dd561.asciidoc000066400000000000000000000015501506101734100265520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:121 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "\n double value = dotProduct(params.query_vector, 'my_dense_vector');\n return sigmoid(1, Math.E, -value); \n ", "params": { "query_vector": [ 4, 3.4, -0.2 ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd04289c54493e19c1d3ac70d0b489c4.asciidoc000066400000000000000000000012011506101734100264140ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest.asciidoc:840 [source, python] ---- resp = client.ingest.put_pipeline( id="my-pipeline", processors=[ { "drop": { "description": "Drop documents that don't contain 'prod' tag", "if": "\n Collection tags = ctx.tags;\n if(tags != null){\n for (String tag : tags) {\n if (tag.toLowerCase().contains('prod')) {\n return false;\n }\n }\n }\n return true;\n " } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd0cd8ecd03468726b59a605eea06d75.asciidoc000066400000000000000000000016401506101734100265650ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // query-dsl/rank-feature-query.asciidoc:138 [source, python] ---- resp = client.search( index="test", query={ "bool": { "must": [ { "match": { "content": "2016" } } ], "should": [ { "rank_feature": { "field": "pagerank" } }, { "rank_feature": { "field": "url_length", "boost": 0.1 } }, { "rank_feature": { "field": "topics.sports", "boost": 0.4 } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd26bfdbe95b2d2db374385d12849f77.asciidoc000066400000000000000000000007261506101734100266040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/trim-tokenfilter.asciidoc:99 [source, python] ---- resp = client.indices.create( index="trim_example", settings={ "analysis": { "analyzer": { "keyword_trim": { "tokenizer": "keyword", "filter": [ "trim" ] } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd2d289e6b725fcc3cbe8fe7ffe02ea0.asciidoc000066400000000000000000000002461506101734100271520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/get-index-template-v1.asciidoc:103 [source, python] ---- resp = client.indices.get_template() print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd352b472d44d197022a46fce90b6ecb.asciidoc000066400000000000000000000012661506101734100265570ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/multi-get.asciidoc:184 [source, python] ---- resp = client.mget( docs=[ { "_index": "test", "_id": "1", "_source": False }, { "_index": "test", "_id": "2", "_source": [ "field3", "field4" ] }, { "_index": "test", "_id": "3", "_source": { "include": [ "user" ], "exclude": [ "user.location" ] } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd60b4092c6552164862cec287359676.asciidoc000066400000000000000000000003541506101734100261410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/anomaly-detection/apis/stop-datafeed.asciidoc:80 [source, python] ---- resp = client.ml.stop_datafeed( datafeed_id="datafeed-low_request_rate", timeout="30s", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd620f09dbce62c6f0f603a366623607.asciidoc000066400000000000000000000010441506101734100264120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // connector/apis/update-connector-filtering-api.asciidoc:156 [source, python] ---- resp = client.connector.update_filtering( connector_id="my-sql-connector", advanced_snippet={ "value": [ { "tables": [ "users", "orders" ], "query": "SELECT users.id AS id, orders.order_id AS order_id FROM users JOIN orders ON users.id = orders.user_id" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd6fdc8fa994dd02cf1177077325304f.asciidoc000066400000000000000000000006231506101734100265110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/troubleshooting/data/restore-from-snapshot.asciidoc:454 [source, python] ---- resp = client.snapshot.restore( repository="my_repository", snapshot="snapshot-20200617", feature_states=[ "geoip" ], indices="kibana_sample_data_flights,.ds-my-data-stream-2022.06.17-000001", include_aliases=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd738a9af7b5d21da31a7722f03aade8.asciidoc000066400000000000000000000003531506101734100267110ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/size-your-shards.asciidoc:171 [source, python] ---- resp = client.cat.shards( v=True, h="index,prirep,shard,store", s="prirep,store", bytes="gb", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd7eeadab6251d9113c4380a7fbe2572.asciidoc000066400000000000000000000010411506101734100266240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/remote-clusters-privileges-api-key.asciidoc:27 [source, python] ---- resp = client.security.put_role( name="remote-replication", cluster=[ "manage_ccr" ], remote_indices=[ { "clusters": [ "my_remote_cluster" ], "names": [ "leader-index" ], "privileges": [ "cross_cluster_replication" ] } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fd9b668eeb1f117950bd4991c7c03fb1.asciidoc000066400000000000000000000003631506101734100265710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // indices/analyze.asciidoc:163 [source, python] ---- resp = client.indices.analyze( analyzer="standard", text=[ "this is a test", "the second text" ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fdada036a875d7995d5d7aba9c06361e.asciidoc000066400000000000000000000005701506101734100266510ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/dense-vector.asciidoc:94 [source, python] ---- resp = client.indices.create( index="my-index-2", mappings={ "properties": { "my_vector": { "type": "dense_vector", "dims": 3, "index": False } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fdc8e090293e78e9a6b283650b682517.asciidoc000066400000000000000000000002741506101734100263040ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:161 [source, python] ---- resp = client.indices.open( index="my-data-stream", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fde3463ddf136fdfff1306a60986515e.asciidoc000066400000000000000000000003621506101734100265720ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // upgrade/archived-settings.asciidoc:64 [source, python] ---- resp = client.indices.get_settings( index="*", flat_settings=True, filter_path="**.settings.archived*", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fdf7cfdf1c92d21ee710675596eac6fd.asciidoc000066400000000000000000000021331506101734100270160ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // tab-widgets/semantic-search/hybrid-search.asciidoc:55 [source, python] ---- resp = client.search( index="my-index", retriever={ "rrf": { "retrievers": [ { "standard": { "query": { "match": { "my_text_field": "the query string" } } } }, { "knn": { "field": "text_embedding.predicted_value", "k": 10, "num_candidates": 100, "query_vector_builder": { "text_embedding": { "model_id": "sentence-transformers__msmarco-minilm-l-12-v3", "model_text": "the query string" } } } } ] } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe208d94ec93eabf3bd06139fa70701e.asciidoc000066400000000000000000000021561506101734100266410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rollup/migrating-to-downsampling.asciidoc:59 [source, python] ---- resp = client.indices.put_index_template( name="sensor-template", index_patterns=[ "sensor-*" ], data_stream={}, template={ "lifecycle": { "downsampling": [ { "after": "1d", "fixed_interval": "1h" } ] }, "settings": { "index.mode": "time_series" }, "mappings": { "properties": { "node": { "type": "keyword", "time_series_dimension": True }, "temperature": { "type": "half_float", "time_series_metric": "gauge" }, "voltage": { "type": "half_float", "time_series_metric": "gauge" }, "@timestamp": { "type": "date" } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe3a927d868cbc530e08e05964d5174a.asciidoc000066400000000000000000000011241506101734100264270ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/sort-search-results.asciidoc:117 [source, python] ---- resp = client.index( index="my-index-000001", id="1", refresh=True, document={ "product": "chocolate", "price": [ 20, 4 ] }, ) print(resp) resp1 = client.search( query={ "term": { "product": "chocolate" } }, sort=[ { "price": { "order": "asc", "mode": "avg" } } ], ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/fe54f3e53dbe7dee40ec3108a461d19a.asciidoc000066400000000000000000000011161506101734100267120ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // security/authentication/jwt-realm.asciidoc:522 [source, python] ---- resp = client.security.put_role_mapping( name="jwt_user1", refresh=True, roles=[ "jwt_role1" ], rules={ "all": [ { "field": { "realm.name": "jwt2" } }, { "field": { "username": "user2" } } ] }, enabled=True, metadata={ "version": 1 }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe6429d0d82174aa5acf95e96e237380.asciidoc000066400000000000000000000013221506101734100264310ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/range.asciidoc:324 [source, python] ---- resp = client.indices.create( index="idx", settings={ "index": { "mapping": { "source": { "mode": "synthetic" } } } }, mappings={ "properties": { "my_range": { "type": "ip_range" } } }, ) print(resp) resp1 = client.index( index="idx", id="1", document={ "my_range": [ "10.0.0.0/24", { "gte": "10.0.0.0", "lte": "10.0.0.255" } ] }, ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/fe6e35839f7d7381f8ec535c8f21959b.asciidoc000066400000000000000000000007141506101734100264660ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // how-to/recipes/scoring.asciidoc:124 [source, python] ---- resp = client.search( index="index", query={ "script_score": { "query": { "match": { "body": "elasticsearch" } }, "script": { "source": "_score * saturation(doc['pagerank'].value, 10)" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe7169bab8e626f582c9ea87585d0f35.asciidoc000066400000000000000000000006111506101734100265300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/types/histogram.asciidoc:98 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "my_histogram": { "type": "histogram" }, "my_text": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe806011466e7cdc1590da186297edb6.asciidoc000066400000000000000000000002631506101734100264260ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // api-conventions.asciidoc:119 [source, python] ---- resp = client.indices.create( index="", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe825c05e13e8163073166572c7ac97d.asciidoc000066400000000000000000000005251506101734100262710ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/geo-grid.asciidoc:199 [source, python] ---- resp = client.index( index="geocells", id="1", pipeline="geohex2shape", document={ "geocell": "811fbffffffffff" }, ) print(resp) resp1 = client.get( index="geocells", id="1", ) print(resp1) ---- python-elasticsearch-9.1.1/docs/examples/fe8c3e2632f5057bfbd1898a8fe4d0d2.asciidoc000066400000000000000000000024561506101734100266610ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-application-api.asciidoc:325 [source, python] ---- resp = client.search_application.put( name="my_search_application", search_application={ "indices": [ "index1", "index2" ], "template": { "script": { "lang": "mustache", "source": "\n {\n \"query\": {\n \"multi_match\": {\n \"query\": \"{{query_string}}\",\n \"fields\": [{{#text_fields}}\"{{name}}^{{boost}}\",{{/text_fields}}]\n }\n },\n \"explain\": \"{{explain}}\",\n \"from\": \"{{from}}\",\n \"size\": \"{{size}}\"\n }\n ", "params": { "query_string": "*", "text_fields": [ { "name": "title", "boost": 10 }, { "name": "description", "boost": 5 } ], "explain": False, "from": 0, "size": 10 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fe96ca3b2a559d8411aca7ed5f3854bd.asciidoc000066400000000000000000000003261506101734100267240ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/common-options.asciidoc:229 [source, python] ---- resp = client.indices.get_settings( index="my-index-000001", flat_settings=True, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/febb71d774e0a1fc67454213d7448c53.asciidoc000066400000000000000000000003421506101734100264200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // scripting/using.asciidoc:367 [source, python] ---- resp = client.update( index="my-index-000001", id="1", script="ctx._source.remove('new_field')", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fece7c0fe1f7d113aa05ff5346a18aff.asciidoc000066400000000000000000000016341506101734100270550ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // data-streams/use-a-data-stream.asciidoc:81 [source, python] ---- resp = client.bulk( index="my-data-stream", refresh=True, operations=[ { "create": {} }, { "@timestamp": "2099-03-08T11:04:05.000Z", "user": { "id": "vlb44hny" }, "message": "Login attempt failed" }, { "create": {} }, { "@timestamp": "2099-03-08T11:06:07.000Z", "user": { "id": "8a4f500d" }, "message": "Login successful" }, { "create": {} }, { "@timestamp": "2099-03-09T11:07:08.000Z", "user": { "id": "l7gk7f82" }, "message": "Logout successful" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/feda4b996ea7004f8b2c5f5007fb717b.asciidoc000066400000000000000000000007711506101734100266470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/range-enrich-policy-type-ex.asciidoc:91 [source, python] ---- resp = client.ingest.put_pipeline( id="networks_lookup", processors=[ { "enrich": { "description": "Add 'network' data based on 'ip'", "policy_name": "networks-policy", "field": "ip", "target_field": "network", "max_matches": "10" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fef520cbc9b0656e6aac7b3dd3da9984.asciidoc000066400000000000000000000004721506101734100270070ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // eql/eql.asciidoc:789 [source, python] ---- resp = client.eql.search( index="my-index*", query="\n sample by host\n [any where uptime > 0] by os\n [any where port > 100] by op_sys\n [any where bool == true] by os\n ", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff05842419968a2141bde0371ac2f6f4.asciidoc000066400000000000000000000006771506101734100263470ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/search-template.asciidoc:320 [source, python] ---- resp = client.render_search_template( source={ "query": { "match": { "user.group.emails": "{{#join}}emails{{/join}}" } } }, params={ "emails": [ "user1@example.com", "user_one@example.com" ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff09e13391cecb2e8b9dd440b37e065f.asciidoc000066400000000000000000000003251506101734100266420ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:316 [source, python] ---- resp = client.search( index="my-new-index-000001", size="0", filter_path="hits.total", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff1b96d2fdcf628bd938bff9e939943c.asciidoc000066400000000000000000000010131506101734100267560ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/runtime.asciidoc:965 [source, python] ---- resp = client.indices.create( index="my-index-000001", mappings={ "properties": { "timestamp": { "type": "date" }, "temperature": { "type": "long" }, "voltage": { "type": "double" }, "node": { "type": "keyword" } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff27e5cddd1f58d8a8f84f807fd27eec.asciidoc000066400000000000000000000013141506101734100271130ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ingest/processors/redact.asciidoc:179 [source, python] ---- resp = client.ingest.simulate( pipeline={ "processors": [ { "redact": { "field": "message", "patterns": [ "%{GITHUB_NAME:GITHUB_NAME}" ], "pattern_definitions": { "GITHUB_NAME": "@%{USERNAME}" } } } ] }, docs=[ { "_source": { "message": "@elastic-data-management the PR is ready for review" } } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff56ded50c65998c70f3c5691ddc6f86.asciidoc000066400000000000000000000003161506101734100266170ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // snapshot-restore/apis/delete-repo-api.asciidoc:33 [source, python] ---- resp = client.snapshot.delete_repository( name="my_repository", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff63ae39c34925dbfa54282ec9989124.asciidoc000066400000000000000000000010071506101734100264410ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // docs/reindex.asciidoc:1009 [source, python] ---- resp = client.reindex( source={ "remote": { "host": "http://otherhost:9200", "headers": { "Authorization": "ApiKey API_KEY_VALUE" } }, "index": "my-index-000001", "query": { "match": { "test": "data" } } }, dest={ "index": "my-new-index-000001" }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff776c0fccf93e1c7050f7cb7efbae0b.asciidoc000066400000000000000000000004701506101734100271430ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // ml/trained-models/apis/infer-trained-model.asciidoc:1012 [source, python] ---- resp = client.ml.infer_trained_model( model_id="model2", docs=[ { "text_field": "Hi my name is Josh and I live in Berlin" } ], ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff7b81fa96c3b994efa3dee230512291.asciidoc000066400000000000000000000006721506101734100265730ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // graph/explore.asciidoc:210 [source, python] ---- resp = client.graph.explore( index="clicklogs", query={ "match": { "query.raw": "midi" } }, vertices=[ { "field": "product" } ], connections={ "vertices": [ { "field": "query.raw" } ] }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ff945f5db7d8a9b0d9f6a2f2fcf849e3.asciidoc000066400000000000000000000011261506101734100270340ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // mapping/fields/tier-field.asciidoc:10 [source, python] ---- resp = client.index( index="index_1", id="1", document={ "text": "Document in index 1" }, ) print(resp) resp1 = client.index( index="index_2", id="2", refresh=True, document={ "text": "Document in index 2" }, ) print(resp1) resp2 = client.search( index="index_1,index_2", query={ "terms": { "_tier": [ "data_hot", "data_warm" ] } }, ) print(resp2) ---- python-elasticsearch-9.1.1/docs/examples/ffcf80e1094aa2d774f56f6b0bc54827.asciidoc000066400000000000000000000004711506101734100265700ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // analysis/tokenfilters/word-delimiter-graph-tokenfilter.asciidoc:47 [source, python] ---- resp = client.indices.analyze( tokenizer="keyword", filter=[ "word_delimiter_graph" ], text="Neil's-Super-Duper-XL500--42+AutoCoder", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ffd63dd186ab81b893faec3b3358fa09.asciidoc000066400000000000000000000003001506101734100267200ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // rest-api/security/delete-users.asciidoc:45 [source, python] ---- resp = client.security.delete_user( username="jacknich", ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/ffda10edaa7ce087703193c3cb95a426.asciidoc000066400000000000000000000070321506101734100266300ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // search/search-your-data/retrievers-examples.asciidoc:14 [source, python] ---- resp = client.indices.create( index="retrievers_example", settings={ "number_of_shards": 1 }, mappings={ "properties": { "vector": { "type": "dense_vector", "dims": 3, "similarity": "l2_norm", "index": True, "index_options": { "type": "flat" } }, "text": { "type": "text" }, "year": { "type": "integer" }, "topic": { "type": "keyword" }, "timestamp": { "type": "date" } } }, ) print(resp) resp1 = client.index( index="retrievers_example", id="1", document={ "vector": [ 0.23, 0.67, 0.89 ], "text": "Large language models are revolutionizing information retrieval by boosting search precision, deepening contextual understanding, and reshaping user experiences in data-rich environments.", "year": 2024, "topic": [ "llm", "ai", "information_retrieval" ], "timestamp": "2021-01-01T12:10:30" }, ) print(resp1) resp2 = client.index( index="retrievers_example", id="2", document={ "vector": [ 0.12, 0.56, 0.78 ], "text": "Artificial intelligence is transforming medicine, from advancing diagnostics and tailoring treatment plans to empowering predictive patient care for improved health outcomes.", "year": 2023, "topic": [ "ai", "medicine" ], "timestamp": "2022-01-01T12:10:30" }, ) print(resp2) resp3 = client.index( index="retrievers_example", id="3", document={ "vector": [ 0.45, 0.32, 0.91 ], "text": "AI is redefining security by enabling advanced threat detection, proactive risk analysis, and dynamic defenses against increasingly sophisticated cyber threats.", "year": 2024, "topic": [ "ai", "security" ], "timestamp": "2023-01-01T12:10:30" }, ) print(resp3) resp4 = client.index( index="retrievers_example", id="4", document={ "vector": [ 0.34, 0.21, 0.98 ], "text": "Elastic introduces Elastic AI Assistant, the open, generative AI sidekick powered by ESRE to democratize cybersecurity and enable users of every skill level.", "year": 2023, "topic": [ "ai", "elastic", "assistant" ], "timestamp": "2024-01-01T12:10:30" }, ) print(resp4) resp5 = client.index( index="retrievers_example", id="5", document={ "vector": [ 0.11, 0.65, 0.47 ], "text": "Learn how to spin up a deployment of our hosted Elasticsearch Service and use Elastic Observability to gain deeper insight into the behavior of your applications and systems.", "year": 2024, "topic": [ "documentation", "observability", "elastic" ], "timestamp": "2025-01-01T12:10:30" }, ) print(resp5) resp6 = client.indices.refresh( index="retrievers_example", ) print(resp6) ---- python-elasticsearch-9.1.1/docs/examples/ffe45a7c70071730c2078cabb8cbdf95.asciidoc000066400000000000000000000021501506101734100266330ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // vectors/vector-functions.asciidoc:294 [source, python] ---- resp = client.search( index="my-index-000001", query={ "script_score": { "query": { "bool": { "filter": { "term": { "status": "published" } } } }, "script": { "source": "\n float[] v = doc['my_dense_vector'].vectorValue;\n float vm = doc['my_dense_vector'].magnitude;\n float dotProduct = 0;\n for (int i = 0; i < v.length; i++) {\n dotProduct += v[i] * params.queryVector[i];\n }\n return dotProduct / (vm * (float) params.queryVectorMag);\n ", "params": { "queryVector": [ 4, 3.4, -0.2 ], "queryVectorMag": 5.25357 } } } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/examples/fff86117c47f974074284644e8a97a99.asciidoc000066400000000000000000000006251506101734100262520ustar00rootroot00000000000000// This file is autogenerated, DO NOT EDIT // inference/service-jinaai.asciidoc:155 [source, python] ---- resp = client.inference.put( task_type="text_embedding", inference_id="jinaai-embeddings", inference_config={ "service": "jinaai", "service_settings": { "model_id": "jina-embeddings-v3", "api_key": "" } }, ) print(resp) ---- python-elasticsearch-9.1.1/docs/guide/000077500000000000000000000000001506101734100176765ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/guide/index-custom-title-page.html000066400000000000000000000151111506101734100252330ustar00rootroot00000000000000

Documentation

The official Python client provides one-to-one mapping with Elasticsearch REST APIs.

Get to know the Python client

ℹ️ The elasticsearch-labs repo contains many interactive Python notebooks for testing out Elasticsearch using the Python client. These examples are mainly focused on vector search, hybrid search and generative AI use cases.

Explore by use case

View all Elastic docs

python-elasticsearch-9.1.1/docs/images/000077500000000000000000000000001506101734100200465ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/images/logo-elastic-glyph-color.svg000066400000000000000000000063621506101734100254150ustar00rootroot00000000000000 python-elasticsearch-9.1.1/docs/reference/000077500000000000000000000000001506101734100205375ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/reference/async.md000066400000000000000000000110731506101734100222000ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/async.html --- # Using with asyncio [async] The `elasticsearch` package supports async/await with [asyncio](https://docs.python.org/3/library/asyncio.html) and [aiohttp](https://docs.aiohttp.org). You can either install `aiohttp` directly or use the `[async]` extra: ```bash $ python -m pip install elasticsearch aiohttp # - OR - $ python -m pip install elasticsearch[async] ``` ## Getting Started with Async [_getting_started_with_async] After installation all async API endpoints are available via `~elasticsearch.AsyncElasticsearch` and are used in the same way as other APIs, with an extra `await`: ```python import asyncio from elasticsearch import AsyncElasticsearch client = AsyncElasticsearch() async def main(): resp = await client.search( index="documents", body={"query": {"match_all": {}}}, size=20, ) print(resp) asyncio.run(main()) ``` All APIs that are available under the sync client are also available under the async client. [Reference documentation](https://elasticsearch-py.readthedocs.io/en/latest/async.html#api-reference) ## ASGI Applications and Elastic APM [_asgi_applications_and_elastic_apm] [ASGI](https://asgi.readthedocs.io) (Asynchronous Server Gateway Interface) is a way to serve Python web applications making use of async I/O to achieve better performance. Some examples of ASGI frameworks include FastAPI, Django 3.0+, and Starlette. If you’re using one of these frameworks along with Elasticsearch then you should be using `~elasticsearch.AsyncElasticsearch` to avoid blocking the event loop with synchronous network calls for optimal performance. [Elastic APM](apm-agent-python://reference/index.md) also supports tracing of async Elasticsearch queries just the same as synchronous queries. For an example on how to configure `AsyncElasticsearch` with a popular ASGI framework [FastAPI](https://fastapi.tiangolo.com/) and APM tracing there is a [pre-built example](https://github.com/elastic/elasticsearch-py/tree/master/examples/fastapi-apm) in the `examples/fastapi-apm` directory. See also the [Using OpenTelemetry](/reference/opentelemetry.md) page. ## Frequently Asked Questions [_frequently_asked_questions] ### ValueError when initializing `AsyncElasticsearch`? [_valueerror_when_initializing_asyncelasticsearch] If when trying to use `AsyncElasticsearch` you receive `ValueError: You must have 'aiohttp' installed to use AiohttpHttpNode` you should ensure that you have `aiohttp` installed in your environment (check with `$ python -m pip freeze | grep aiohttp`). Otherwise, async support won’t be available. ### What about the `elasticsearch-async` package? [_what_about_the_elasticsearch_async_package] Previously asyncio was supported separately via the [elasticsearch-async](https://github.com/elastic/elasticsearch-py-async) package. The `elasticsearch-async` package has been deprecated in favor of `AsyncElasticsearch` provided by the `elasticsearch` package in v7.8 and onwards. ### Receiving *Unclosed client session / connector* warning? [_receiving_unclosed_client_session_connector_warning] This warning is created by `aiohttp` when an open HTTP connection is garbage collected. You’ll typically run into this when closing your application. To resolve the issue ensure that `~elasticsearch.AsyncElasticsearch.close` is called before the `~elasticsearch.AsyncElasticsearch` instance is garbage collected. For example if using FastAPI that might look like this: ```python import os from contextlib import asynccontextmanager from fastapi import FastAPI from elasticsearch import AsyncElasticsearch ELASTICSEARCH_URL = os.environ["ELASTICSEARCH_URL"] client = None @asynccontextmanager async def lifespan(app: FastAPI): global client client = AsyncElasticsearch(ELASTICSEARCH_URL) yield await client.close() app = FastAPI(lifespan=lifespan) @app.get("/") async def main(): return await client.info() ``` You can run this example by saving it to `main.py` and executing `ELASTICSEARCH_URL=http://localhost:9200 uvicorn main:app`. ## Async Helpers [_async_helpers] Async variants of all helpers are available in `elasticsearch.helpers` and are all prefixed with `async_*`. You’ll notice that these APIs are identical to the ones in the sync [*Client helpers*](/reference/client-helpers.md) documentation. All async helpers that accept an iterator or generator also accept async iterators and async generators. [Reference documentation](https://elasticsearch-py.readthedocs.io/en/latest/async.html#api-reference) python-elasticsearch-9.1.1/docs/reference/client-helpers.md000066400000000000000000000041671506101734100240070ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/client-helpers.html --- # Client helpers [client-helpers] You can find here a collection of simple helper functions that abstract some specifics of the raw API. ## Bulk helpers [bulk-helpers] There are several helpers for the bulk API since its requirement for specific formatting and other considerations can make it cumbersome if used directly. All bulk helpers accept an instance of `Elasticsearch` class and an iterable `action` (any iterable, can also be a generator, which is ideal in most cases since it allows you to index large datasets without the need of loading them into memory). The items in the iterable `action` should be the documents we wish to index in several formats. The most common one is the same as returned by `search()`, for example: ```yaml { '_index': 'index-name', '_id': 42, '_routing': 5, 'pipeline': 'my-ingest-pipeline', '_source': { "title": "Hello World!", "body": "..." } } ``` Alternatively, if `_source` is not present, it pops all metadata fields from the doc and use the rest as the document data: ```yaml { "_id": 42, "_routing": 5, "title": "Hello World!", "body": "..." } ``` The `bulk()` api accepts `index`, `create`, `delete`, and `update` actions. Use the `_op_type` field to specify an action (`_op_type` defaults to `index`): ```yaml { '_op_type': 'delete', '_index': 'index-name', '_id': 42, } { '_op_type': 'update', '_index': 'index-name', '_id': 42, 'doc': {'question': 'The life, universe and everything.'} } ``` ## Scan [scan] Simple abstraction on top of the `scroll()` API - a simple iterator that yields all hits as returned by underlining scroll requests. By default scan does not return results in any pre-determined order. To have a standard order in the returned documents (either by score or explicit sort definition) when scrolling, use `preserve_order=True`. This may be an expensive operation and will negate the performance benefits of using `scan`. ```py scan(es, query={"query": {"match": {"title": "python"}}}, index="orders-*" ) ``` python-elasticsearch-9.1.1/docs/reference/configuration.md000066400000000000000000000336471506101734100237450ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/config.html --- # Configuration [config] This page contains information about the most important configuration options of the Python {{es}} client. ## TLS/SSL [tls-and-ssl] The options in this section can only be used when the node is configured for HTTPS. An error will be raised if using these options with an HTTP node. ### Verifying server certificates [_verifying_server_certificates] The typical route to verify a cluster certificate is via a "CA bundle" which can be specified via the `ca_certs` parameter. If no options are given and the [certifi package](https://github.com/certifi/python-certifi) is installed then certifi’s CA bundle is used by default. If you have your own CA bundle to use you can configure via the `ca_certs` parameter: ```python client = Elasticsearch( "https://...", ca_certs="/path/to/certs.pem" ) ``` If using a generated certificate or certificate with a known fingerprint you can use the `ssl_assert_fingerprint` to specify the fingerprint which tries to match the server’s leaf certificate during the TLS handshake. If there is any matching certificate the connection is verified, otherwise a `TlsError` is raised. In Python 3.9 and earlier only the leaf certificate will be verified but in Python 3.10+ private APIs are used to verify any certificate in the certificate chain. This helps when using certificates that are generated on a multi-node cluster. ```python client = Elasticsearch( "https://...", ssl_assert_fingerprint=( "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3" ) ) ``` To disable certificate verification use the `verify_certs=False` parameter. This option should be avoided in production, instead use the other options to verify the clusters' certificate. ```python client = Elasticsearch( "https://...", verify_certs=False ) ``` ### TLS versions [_tls_versions] Configuring the minimum TLS version to connect to is done via the `ssl_version` parameter. By default this is set to a minimum value of TLSv1.2. Use the `ssl.TLSVersion` enumeration to specify versions. ```python import ssl client = Elasticsearch( ..., ssl_version=ssl.TLSVersion.TLSv1_2 ) ``` ### Client TLS certificate authentication [_client_tls_certificate_authentication] Elasticsearch can be configured to authenticate clients via TLS client certificates. Client certificate and keys can be configured via the `client_cert` and `client_key` parameters: ```python client = Elasticsearch( ..., client_cert="/path/to/cert.pem", client_key="/path/to/key.pem", ) ``` ### Using an SSLContext [_using_an_sslcontext] For advanced users an `ssl.SSLContext` object can be used for configuring TLS via the `ssl_context` parameter. The `ssl_context` parameter can’t be combined with any other TLS options except for the `ssl_assert_fingerprint` parameter. ```python import ssl # Create and configure an SSLContext ctx = ssl.create_default_context() ctx.load_verify_locations(...) client = Elasticsearch( ..., ssl_context=ctx ) ``` ## HTTP compression [compression] Compression of HTTP request and response bodies can be enabled with the `http_compress` parameter. If enabled then HTTP request bodies will be compressed with `gzip` and HTTP responses will include the `Accept-Encoding: gzip` HTTP header. By default compression is disabled. ```python client = Elasticsearch( ..., http_compress=True # Enable compression! ) ``` HTTP compression is recommended to be enabled when requests are traversing the network. Compression is automatically enabled when connecting to Elastic Cloud. ## Request timeouts [timeouts] Requests can be configured to timeout if taking too long to be serviced. The `request_timeout` parameter can be passed via the client constructor or the client `.options()` method. When the request times out the node will raise a `ConnectionTimeout` exception which can trigger retries. Setting `request_timeout` to `None` will disable timeouts. ```python client = Elasticsearch( ..., request_timeout=10 # 10 second timeout ) # Search request will timeout in 5 seconds client.options(request_timeout=5).search(...) ``` ### API and server timeouts [_api_and_server_timeouts] There are API-level timeouts to take into consideration when making requests which can cause the request to timeout on server-side rather than client-side. You may need to configure both a transport and API level timeout for long running operations. In the example below there are three different configurable timeouts for the `cluster.health` API all with different meanings for the request: ```python client.options( # Amount of time to wait for an HTTP response to start. request_timeout=30 ).cluster.health( # Amount of time to wait to collect info on all nodes. timeout=30, # Amount of time to wait for info from the master node. master_timeout=10, ) ``` ## Retries [retries] Requests can be retried if they don’t return with a successful response. This provides a way for requests to be resilient against transient failures or overloaded nodes. The maximum number of retries per request can be configured via the `max_retries` parameter. Setting this parameter to 0 disables retries. This parameter can be set in the client constructor or per-request via the client `.options()` method: ```python client = Elasticsearch( ..., max_retries=5 ) # For this API request we disable retries with 'max_retries=0' client.options(max_retries=0).index( index="blogs", document={ "title": "..." } ) ``` ### Retrying on connection errors and timeouts [_retrying_on_connection_errors_and_timeouts] Connection errors are automatically retried if retries are enabled. Retrying requests on connection timeouts can be enabled or disabled via the `retry_on_timeout` parameter. This parameter can be set on the client constructor or via the client `.options()` method: ```python client = Elasticsearch( ..., retry_on_timeout=True ) client.options(retry_on_timeout=False).info() ``` ### Retrying status codes [_retrying_status_codes] By default if retries are enabled `retry_on_status` is set to `(429, 502, 503, 504)`. This parameter can be set on the client constructor or via the client `.options()` method. Setting this value to `()` will disable the default behavior. ```python client = Elasticsearch( ..., retry_on_status=() ) # Retry this API on '500 Internal Error' statuses client.options(retry_on_status=[500]).index( index="blogs", document={ "title": "..." } ) ``` ### Ignoring status codes [_ignoring_status_codes] By default an `ApiError` exception will be raised for any non-2XX HTTP requests that exhaust retries, if any. If you’re expecting an HTTP error from the API but aren’t interested in raising an exception you can use the `ignore_status` parameter via the client `.options()` method. A good example where this is useful is setting up or cleaning up resources in a cluster in a robust way: ```python client = Elasticsearch(...) # API request is robust against the index not existing: resp = client.options(ignore_status=404).indices.delete(index="delete-this") resp.meta.status # Can be either '2XX' or '404' # API request is robust against the index already existing: resp = client.options(ignore_status=[400]).indices.create( index="create-this", mapping={ "properties": {"field": {"type": "integer"}} } ) resp.meta.status # Can be either '2XX' or '400' ``` When using the `ignore_status` parameter the error response will be returned serialized just like a non-error response. In these cases it can be useful to inspect the HTTP status of the response. To do this you can inspect the `resp.meta.status`. ## Sniffing for new nodes [sniffing] Additional nodes can be discovered by a process called "sniffing" where the client will query the cluster for more nodes that can handle requests. Sniffing can happen at three different times: on client instantiation, before requests, and on a node failure. These three behaviors can be enabled and disabled with the `sniff_on_start`, `sniff_before_requests`, and `sniff_on_node_failure` parameters. ::::{important} When using an HTTP load balancer or proxy you cannot use sniffing functionality as the cluster would supply the client with IP addresses to directly connect to the cluster, circumventing the load balancer. Depending on your configuration this might be something you don’t want or break completely. :::: ### Waiting between sniffing attempts [_waiting_between_sniffing_attempts] To avoid needlessly sniffing too often there is a delay between attempts to discover new nodes. This value can be controlled via the `min_delay_between_sniffing` parameter. ### Filtering nodes which are sniffed [_filtering_nodes_which_are_sniffed] By default nodes which are marked with only a `master` role will not be used. To change the behavior the parameter `sniffed_node_callback` can be used. To mark a sniffed node not to be added to the node pool return `None` from the `sniffed_node_callback`, otherwise return a `NodeConfig` instance. ```python from typing import Optional, Dict, Any from elastic_transport import NodeConfig from elasticsearch import Elasticsearch def filter_master_eligible_nodes( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: # This callback ignores all nodes that are master eligible # instead of master-only nodes (default behavior) if "master" in node_info.get("roles", ()): return None return node_config client = Elasticsearch( "https://localhost:9200", sniffed_node_callback=filter_master_eligible_nodes ) ``` The `node_info` parameter is part of the response from the `nodes.info()` API, below is an example of what that object looks like: ```json { "name": "SRZpKFZ", "transport_address": "127.0.0.1:9300", "host": "127.0.0.1", "ip": "127.0.0.1", "version": "5.0.0", "build_hash": "253032b", "roles": ["master", "data", "ingest"], "http": { "bound_address": ["[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200"], "publish_address": "1.1.1.1:123", "max_content_length_in_bytes": 104857600 } } ``` ## Node Pool [node-pool] ### Selecting a node from the pool [_selecting_a_node_from_the_pool] You can specify a node selector pattern via the `node_selector_class` parameter. The supported values are `round_robin` and `random`. Default is `round_robin`. ```python client = Elasticsearch( ..., node_selector_class="round_robin" ) ``` Custom selectors are also supported: ```python from elastic_transport import NodeSelector class CustomSelector(NodeSelector): def select(nodes): ... client = Elasticsearch( ..., node_selector_class=CustomSelector ) ``` ### Marking nodes dead and alive [_marking_nodes_dead_and_alive] Individual nodes of Elasticsearch may have transient connectivity or load issues which may make them unable to service requests. To combat this the pool of nodes will detect when a node isn’t able to service requests due to transport or API errors. After a node has been timed out it will be moved back to the set of "alive" nodes but only after the node returns a successful response will the node be marked as "alive" in terms of consecutive errors. The `dead_node_backoff_factor` and `max_dead_node_backoff` parameters can be used to configure how long the node pool will put the node into timeout with each consecutive failure. Both parameters use a unit of seconds. The calculation is equal to `min(dead_node_backoff_factor * (2 ** (consecutive_failures - 1)), max_dead_node_backoff)`. ## Serializers [serializer] Serializers transform bytes on the wire into native Python objects and vice-versa. By default the client ships with serializers for `application/json`, `application/x-ndjson`, `text/*`, `application/vnd.apache.arrow.stream` and `application/mapbox-vector-tile`. You can define custom serializers via the `serializers` parameter: ```python from elasticsearch import Elasticsearch, JsonSerializer class JsonSetSerializer(JsonSerializer): """Custom JSON serializer that handles Python sets""" def default(self, data: Any) -> Any: if isinstance(data, set): return list(data) return super().default(data) client = Elasticsearch( ..., # Serializers are a mapping of 'mimetype' to Serializer class. serializers={"application/json": JsonSetSerializer()} ) ``` If the `orjson` package is installed, you can use the faster ``OrjsonSerializer`` for the default mimetype (``application/json``): ```python from elasticsearch import Elasticsearch, OrjsonSerializer es = Elasticsearch( ..., serializer=OrjsonSerializer() ) ``` orjson is particularly fast when serializing vectors as it has native numpy support. This will be the default in a future release. Note that you can install orjson with the `orjson` extra: ```sh $ python -m pip install elasticsearch[orjson] ``` ## Nodes [nodes] ### Node implementations [_node_implementations] The default node class for synchronous I/O is `urllib3` and the default node class for asynchronous I/O is `aiohttp`. For all of the built-in HTTP node implementations like `urllib3`, `requests`, and `aiohttp` you can specify with a simple string to the `node_class` parameter: ```python from elasticsearch import Elasticsearch client = Elasticsearch( ..., node_class="requests" ) ``` You can also specify a custom node implementation via the `node_class` parameter: ```python from elasticsearch import Elasticsearch from elastic_transport import Urllib3HttpNode class CustomHttpNode(Urllib3HttpNode): ... client = Elasticsearch( ... node_class=CustomHttpNode ) ``` ### HTTP connections per node [_http_connections_per_node] Each node contains its own pool of HTTP connections to allow for concurrent requests. This value is configurable via the `connections_per_node` parameter: ```python client = Elasticsearch( ..., connections_per_node=5 ) ``` python-elasticsearch-9.1.1/docs/reference/connecting.md000066400000000000000000000326261506101734100232210ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/connecting.html --- # Connecting [connecting] This page contains the information you need to connect the Client with {{es}}. ## Connecting to Elastic Cloud [connect-ec] [Elastic Cloud](docs-content://deploy-manage/deploy/elastic-cloud/cloud-hosted.md) is the easiest way to get started with {{es}}. When connecting to Elastic Cloud with the Python {{es}} client you should always use the `cloud_id` parameter to connect. You can find this value within the "Manage Deployment" page after you’ve created a cluster (look in the top-left if you’re in Kibana). We recommend using a Cloud ID whenever possible because your client will be automatically configured for optimal use with Elastic Cloud including HTTPS and HTTP compression. ```python from elasticsearch import Elasticsearch # Password for the 'elastic' user generated by Elasticsearch ELASTIC_PASSWORD = "" # Found in the 'Manage Deployment' page CLOUD_ID = "deployment-name:dXMtZWFzdDQuZ2Nw..." # Create the client instance client = Elasticsearch( cloud_id=CLOUD_ID, basic_auth=("elastic", ELASTIC_PASSWORD) ) # Successful response! client.info() # {'name': 'instance-0000000000', 'cluster_name': ...} ``` ## Connecting to a self-managed cluster [connect-self-managed-new] By default {{es}} will start with security features like authentication and TLS enabled. To connect to the {{es}} cluster you’ll need to configure the Python {{es}} client to use HTTPS with the generated CA certificate in order to make requests successfully. If you’re just getting started with {{es}} we recommend reading the documentation on [configuring](docs-content://deploy-manage/deploy/self-managed/configure-elasticsearch.md) and [starting {{es}}](docs-content://deploy-manage/maintenance/start-stop-services/start-stop-elasticsearch.md) to ensure your cluster is running as expected. When you start {{es}} for the first time you’ll see a distinct block like the one below in the output from {{es}} (you may have to scroll up if it’s been a while): ```sh ---------------------------------------------------------------- -> Elasticsearch security features have been automatically configured! -> Authentication is enabled and cluster connections are encrypted. -> Password for the elastic user (reset with `bin/elasticsearch-reset-password -u elastic`): lhQpLELkjkrawaBoaz0Q -> HTTP CA certificate SHA-256 fingerprint: a52dd93511e8c6045e21f16654b77c9ee0f34aea26d9f40320b531c474676228 ... ---------------------------------------------------------------- ``` Note down the `elastic` user password and HTTP CA fingerprint for the next sections. In the examples below they will be stored in the variables `ELASTIC_PASSWORD` and `CERT_FINGERPRINT` respectively. Depending on the circumstances there are two options for verifying the HTTPS connection, either verifying with the CA certificate itself or via the HTTP CA certificate fingerprint. ### Verifying HTTPS with CA certificates [_verifying_https_with_ca_certificates] Using the `ca_certs` option is the default way the Python {{es}} client verifies an HTTPS connection. The generated root CA certificate can be found in the `certs` directory in your {{es}} config location (`$ES_CONF_PATH/certs/http_ca.crt`). If you’re running {{es}} in Docker there is [additional documentation for retrieving the CA certificate](docs-content://deploy-manage/deploy/self-managed/install-elasticsearch-with-docker.md). Once you have the `http_ca.crt` file somewhere accessible pass the path to the client via `ca_certs`: ```python from elasticsearch import Elasticsearch # Password for the 'elastic' user generated by Elasticsearch ELASTIC_PASSWORD = "" # Create the client instance client = Elasticsearch( "https://localhost:9200", ca_certs="/path/to/http_ca.crt", basic_auth=("elastic", ELASTIC_PASSWORD) ) # Successful response! client.info() # {'name': 'instance-0000000000', 'cluster_name': ...} ``` ::::{note} If you don’t specify `ca_certs` or `ssl_assert_fingerprint` then the [certifi package](https://certifiio.readthedocs.io) will be used for `ca_certs` by default if available. :::: ### Verifying HTTPS with certificate fingerprints (Python 3.10 or later) [_verifying_https_with_certificate_fingerprints_python_3_10_or_later] ::::{note} Using this method **requires using Python 3.10 or later** and isn’t available when using the `aiohttp` HTTP client library so can’t be used with `AsyncElasticsearch`. :::: This method of verifying the HTTPS connection takes advantage of the certificate fingerprint value noted down earlier. Take this SHA256 fingerprint value and pass it to the Python {{es}} client via `ssl_assert_fingerprint`: ```python from elasticsearch import Elasticsearch # Fingerprint either from Elasticsearch startup or above script. # Colons and uppercase/lowercase don't matter when using # the 'ssl_assert_fingerprint' parameter CERT_FINGERPRINT = "A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28" # Password for the 'elastic' user generated by Elasticsearch ELASTIC_PASSWORD = "" client = Elasticsearch( "https://localhost:9200", ssl_assert_fingerprint=CERT_FINGERPRINT, basic_auth=("elastic", ELASTIC_PASSWORD) ) # Successful response! client.info() # {'name': 'instance-0000000000', 'cluster_name': ...} ``` The certificate fingerprint can be calculated using `openssl x509` with the certificate file: ```sh openssl x509 -fingerprint -sha256 -noout -in /path/to/http_ca.crt ``` If you don’t have access to the generated CA file from {{es}} you can use the following script to output the root CA fingerprint of the {{es}} instance with `openssl s_client`: ```sh # Replace the values of 'localhost' and '9200' to the # corresponding host and port values for the cluster. openssl s_client -connect localhost:9200 -servername localhost -showcerts /dev/null \ | openssl x509 -fingerprint -sha256 -noout -in /dev/stdin ``` The output of `openssl x509` will look something like this: ```sh SHA256 Fingerprint=A5:2D:D9:35:11:E8:C6:04:5E:21:F1:66:54:B7:7C:9E:E0:F3:4A:EA:26:D9:F4:03:20:B5:31:C4:74:67:62:28 ``` ## Connecting without security enabled [connect-no-security] ::::{warning} Running {{es}} without security enabled is not recommended. :::: If your cluster is configured with [security explicitly disabled](elasticsearch://reference/elasticsearch/configuration-reference/security-settings.md) then you can connect via HTTP: ```python from elasticsearch import Elasticsearch # Create the client instance client = Elasticsearch("http://localhost:9200") # Successful response! client.info() # {'name': 'instance-0000000000', 'cluster_name': ...} ``` ## Connecting to multiple nodes [connect-url] The Python {{es}} client supports sending API requests to multiple nodes in the cluster. This means that work will be more evenly spread across the cluster instead of hammering the same node over and over with requests. To configure the client with multiple nodes you can pass a list of URLs, each URL will be used as a separate node in the pool. ```python from elasticsearch import Elasticsearch # List of nodes to connect use with different hosts and ports. NODES = [ "https://localhost:9200", "https://localhost:9201", "https://localhost:9202", ] # Password for the 'elastic' user generated by Elasticsearch ELASTIC_PASSWORD = "" client = Elasticsearch( NODES, ca_certs="/path/to/http_ca.crt", basic_auth=("elastic", ELASTIC_PASSWORD) ) ``` By default nodes are selected using round-robin, but alternate node selection strategies can be configured with `node_selector_class` parameter. ::::{note} If your {{es}} cluster is behind a load balancer like when using Elastic Cloud you won’t need to configure multiple nodes. Instead use the load balancer host and port. :::: ## Authentication [authentication] This section contains code snippets to show you how to connect to various {{es}} providers. All authentication methods are supported on the client constructor or via the per-request `.options()` method: ```python from elasticsearch import Elasticsearch # Authenticate from the constructor client = Elasticsearch( "https://localhost:9200", ca_certs="/path/to/http_ca.crt", basic_auth=("username", "password") ) # Authenticate via the .options() method: client.options( basic_auth=("username", "password") ).indices.get(index="*") # You can persist the authenticated client to use # later or use for multiple API calls: auth_client = client.options(api_key="api_key") for i in range(10): auth_client.index( index="example-index", document={"field": i} ) ``` ### HTTP Basic authentication (Username and Password) [auth-basic] HTTP Basic authentication uses the `basic_auth` parameter by passing in a username and password within a tuple: ```python from elasticsearch import Elasticsearch # Adds the HTTP header 'Authorization: Basic ' client = Elasticsearch( "https://localhost:9200", ca_certs="/path/to/http_ca.crt", basic_auth=("username", "password") ) ``` ### HTTP Bearer authentication [auth-bearer] HTTP Bearer authentication uses the `bearer_auth` parameter by passing the token as a string. This authentication method is used by [Service Account Tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-service-token) and [Bearer Tokens](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-get-token). ```python from elasticsearch import Elasticsearch # Adds the HTTP header 'Authorization: Bearer token-value' client = Elasticsearch( "https://localhost:9200", bearer_auth="token-value" ) ``` ### API Key authentication [auth-apikey] You can configure the client to use {{es}}'s API Key for connecting to your cluster. These can be generated through the [Elasticsearch Create API key API](https://www.elastic.co/docs/api/doc/elasticsearch/operation/operation-security-create-api-key) or [Kibana Stack Management](docs-content://deploy-manage/api-keys/elasticsearch-api-keys.md#create-api-key). ```python from elasticsearch import Elasticsearch # Adds the HTTP header 'Authorization: ApiKey ' client = Elasticsearch( "https://localhost:9200", ca_certs="/path/to/http_ca.crt", api_key="api_key", ) ``` ## Enabling the Compatibility Mode [compatibility-mode] The {{es}} server version 8.0 is introducing a new compatibility mode that allows you a smoother upgrade experience from 7 to 8. In a nutshell, you can use the latest 7.x Python {{es}} {{es}} client with an 8.x {{es}} server, giving more room to coordinate the upgrade of your codebase to the next major version. If you want to leverage this functionality, please make sure that you are using the latest 7.x Python {{es}} client and set the environment variable `ELASTIC_CLIENT_APIVERSIONING` to `true`. The client is handling the rest internally. For every 8.0 and beyond Python {{es}} client, you’re all set! The compatibility mode is enabled by default. ## Using the Client in a Function-as-a-Service Environment [connecting-faas] This section illustrates the best practices for leveraging the {{es}} client in a Function-as-a-Service (FaaS) environment. The most influential optimization is to initialize the client outside of the function, the global scope. This practice does not only improve performance but also enables background functionality as – for example – [sniffing](https://www.elastic.co/blog/elasticsearch-sniffing-best-practices-what-when-why-how). The following examples provide a skeleton for the best practices. ::::{important} The async client shouldn’t be used within Function-as-a-Service as a new event loop must be started for each invocation. Instead the synchronous `Elasticsearch` client is recommended. :::: ### GCP Cloud Functions [connecting-faas-gcp] ```python from elasticsearch import Elasticsearch # Client initialization client = Elasticsearch( cloud_id="deployment-name:ABCD...", api_key=... ) def main(request): # Use the client client.search(index=..., query={"match_all": {}}) ``` ### AWS Lambda [connecting-faas-aws] ```python from elasticsearch import Elasticsearch # Client initialization client = Elasticsearch( cloud_id="deployment-name:ABCD...", api_key=... ) def main(event, context): # Use the client client.search(index=..., query={"match_all": {}}) ``` ### Azure Functions [connecting-faas-azure] ```python import azure.functions as func from elasticsearch import Elasticsearch # Client initialization client = Elasticsearch( cloud_id="deployment-name:ABCD...", api_key=... ) def main(request: func.HttpRequest) -> func.HttpResponse: # Use the client client.search(index=..., query={"match_all": {}}) ``` Resources used to assess these recommendations: * [GCP Cloud Functions: Tips & Tricks](https://cloud.google.com/functions/docs/bestpractices/tips#use_global_variables_to_reuse_objects_in_future_invocations) * [Best practices for working with AWS Lambda functions](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html) * [Azure Functions Python developer guide](https://docs.microsoft.com/en-us/azure/azure-functions/functions-reference-python?tabs=azurecli-linux%2Capplication-level#global-variables) * [AWS Lambda: Comparing the effect of global scope](https://docs.aws.amazon.com/lambda/latest/operatorguide/global-scope.html) python-elasticsearch-9.1.1/docs/reference/dsl_configuration.md000066400000000000000000000074311506101734100245770ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_configuration.html --- # Configuration [_configuration] There are several ways to configure connections for the library. The easiest and most useful approach is to define one default connection that can be used every time an API call is made without explicitly passing in other connections. ::::{note} Unless you want to access multiple clusters from your application, it is highly recommended that you use the `create_connection` method and all operations will use that connection automatically. :::: ## Default connection [_default_connection] To define a default connection that can be used globally, use the `connections` module and the `create_connection` method like this: ```python from elasticsearch.dsl import connections connections.create_connection(hosts=['localhost'], timeout=20) ``` ### Single connection with an alias [_single_connection_with_an_alias] You can define the `alias` or name of a connection so you can easily refer to it later. The default value for `alias` is `default`. ```python from elasticsearch.dsl import connections connections.create_connection(alias='my_new_connection', hosts=['localhost'], timeout=60) ``` Additional keyword arguments (`hosts` and `timeout` in our example) will be passed to the `Elasticsearch` class from `elasticsearch-py`. To see all possible configuration options refer to the [documentation](https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html). ## Multiple clusters [_multiple_clusters] You can define multiple connections to multiple clusters at the same time using the `configure` method: ```python from elasticsearch.dsl import connections connections.configure( default={'hosts': 'localhost'}, dev={ 'hosts': ['esdev1.example.com:9200'], 'sniff_on_start': True } ) ``` Such connections will be constructed lazily when requested for the first time. You can alternatively define multiple connections by adding them one by one as shown in the following example: ```python # if you have configuration options to be passed to Elasticsearch.__init__ # this also shows creating a connection with the alias 'qa' connections.create_connection('qa', hosts=['esqa1.example.com'], sniff_on_start=True) # if you already have an Elasticsearch instance ready connections.add_connection('another_qa', my_client) ``` ### Using aliases [_using_aliases] When using multiple connections, you can refer to them using the string alias specified when you created the connection. This example shows how to use an alias to a connection: ```python s = Search(using='qa') ``` A `KeyError` will be raised if there is no connection registered with that alias. ## Manual [_manual] If you don’t want to supply a global configuration, you can always pass in your own connection as an instance of `elasticsearch.Elasticsearch` with the parameter `using` wherever it is accepted like this: ```python s = Search(using=Elasticsearch('localhost')) ``` You can even use this approach to override any connection the object might be already associated with: ```python s = s.using(Elasticsearch('otherhost:9200')) ``` ::::{note} When using the `dsl` module, it is highly recommended that you use the built-in serializer (`elasticsearch.dsl.serializer.serializer`) to ensure your objects are correctly serialized into `JSON` every time. The `create_connection` method that is described here (and that the `configure` method uses under the hood) will do that automatically for you, unless you explicitly specify your own serializer. The built-in serializer also allows you to serialize your own objects - just define a `to_dict()` method on your objects and that method will be automatically called when serializing your custom objects to `JSON`. :::: python-elasticsearch-9.1.1/docs/reference/dsl_examples.md000066400000000000000000000004541506101734100235440ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_examples.html --- # Examples [_examples] Please see the [DSL examples](https://github.com/elastic/elasticsearch-py/tree/master/examples/dsl) directory to see some complex examples using the DSL module. python-elasticsearch-9.1.1/docs/reference/dsl_how_to_guides.md000066400000000000000000001630541506101734100245730ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_how_to_guides.html --- # How-To Guides [_how_to_guides] ## Search DSL [search_dsl] ### The `Search` object [_the_search_object] The `Search` object represents the entire search request: * queries * filters * aggregations * k-nearest neighbor searches * sort * pagination * highlighting * suggestions * collapsing * additional parameters * associated client The API is designed to be chainable. With the exception of the aggregations functionality this means that the `Search` object is immutable -all changes to the object will result in a shallow copy being created which contains the changes. You can safely pass the `Search` object to foreign code without fear of it modifying your objects as long as it sticks to the `Search` object APIs. You can pass an instance of the [elasticsearch client](https://elasticsearch-py.readthedocs.io/) when instantiating the `Search` object: ```python from elasticsearch import Elasticsearch from elasticsearch.dsl import Search client = Elasticsearch() s = Search(using=client) ``` You can also define the client at a later time (for more options see the `configuration` chapter): ```python s = s.using(client) ``` ::::{note} All methods return a *copy* of the object, making it safe to pass to outside code. :::: The API is chainable, allowing you to combine multiple method calls in one statement: ```python s = Search().using(client).query(Match("title", "python")) ``` To send the request to Elasticsearch: ```python response = s.execute() ``` If you just want to iterate over the hits returned by your search you can iterate over the `Search` object: ```python for hit in s: print(hit.title) ``` Search results will be cached. Subsequent calls to `execute` or trying to iterate over an already executed `Search` object will not trigger additional requests being sent to Elasticsearch. To force a new request to be issued specify `ignore_cache=True` when calling `execute`. For debugging purposes you can serialize the `Search` object to a `dict` with the raw Elasticsearch request: ```python print(s.to_dict()) ``` #### Delete By Query [_delete_by_query] You can delete the documents matching a search by calling `delete` on the `Search` object instead of `execute` like this: ```python s = Search(index='i').query(Match("title", "python")) response = s.delete() ``` To pass [deletion parameters](https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query) in your query, you can add them by calling ``params`` on the ``Search`` object before ``delete`` like this: ```python s = Search(index='i').query("match", title="python") s = s.params(ignore_unavailable=False, wait_for_completion=True) response = s.delete() ``` #### Queries [_queries] The `elasticsearch.dsl.query` module provides classes for all Elasticsearch query types. These classes accept keyword arguments in their constructors, which are serialized to the appropriate format to be sent to Elasticsearch. There is a clear one-to-one mapping between the raw query and its equivalent class-based version: ```python >>> from elasticsearch.dsl.query import MultiMatch, Match >>> q = MultiMatch(query='python django', fields=['title', 'body']) >>> q.to_dict() {'multi_match': {'query': 'python django', 'fields': ['title', 'body']}} >>> q = Match("title", {"query": "web framework", "type": "phrase"}) >>> q.to_dict() {'match': {'title': {'query': 'web framework', 'type': 'phrase'}}} ``` An alternative to the class-based queries is to use the `Q` shortcut, passing a query name followed by its parameters, or the raw query as a `dict`: ```python from elasticsearch.dsl import Q Q("multi_match", query='python django', fields=['title', 'body']) Q({"multi_match": {"query": "python django", "fields": ["title", "body"]}}) ``` To add a query to the `Search` object, use the `.query()` method. This works with class-based or `Q` queries: ```python q = Q("multi_match", query='python django', fields=['title', 'body']) s = s.query(q) ``` As a shortcut the `query()` method also accepts all the parameters of the `Q` shortcut directly: ```python s = s.query("multi_match", query='python django', fields=['title', 'body']) ``` If you already have a query object, or a `dict` representing one, you can assign it to the `query` attribute of a `Search` object to add it to it, replacing any previously configured queries: ```python s.query = Q('bool', must=[Q('match', title='python'), Q('match', body='best')]) ``` #### Dotted fields [_dotted_fields] Sometimes you want to refer to a field within another field, either as a multi-field (`title.keyword`) or in a structured `json` document like `address.city`. This is not a problem when using class-based queries, but when working without classes it is often required to pass field names as keyword arguments. To make this easier, you can use `__` (double underscore) in place of a dot in a keyword argument: ```python s = Search() s = s.filter('term', category__keyword='Python') s = s.query('match', address__city='prague') ``` Alternatively you can use Python’s keyword argument unpacking: ```python s = Search() s = s.filter('term', **{'category.keyword': 'Python'}) s = s.query('match', **{'address.city': 'prague'}) ``` #### Query combination [_query_combination] Query objects can be combined using logical operators `|`, `&` and `~`: ```python >>> q = Match("title", "python") | Match("title", "django") >>> q.to_dict() {'bool': {'should': [{'match': {'title': 'python'}}, {'match': {'title': 'django'}}]}} >>> q = Match("title", "python") & Match("title", "django") >>> q.to_dict() {'bool': {'must': [{'match': {'title': 'python'}}, {'match': {'title': 'django'}}]}} >>> q = ~Match("title", "python") >>> q.to_dict() {'bool': {'must_not': [{'match': {'title': 'python'}}]}} ``` When you call the `.query()` method multiple times, the `&` operator will be used internally to combine all the queries: ```python s = s.query().query() print(s.to_dict()) # {"query": {"bool": {...}}} ``` If you want to have precise control over the query form, use the `Q` shortcut to directly construct the combined query: ```python q = Q('bool', must=[Q('match', title='python')], should=[Q(...), Q(...)], minimum_should_match=1 ) s = Search().query(q) ``` #### Filters [_filters] If you want to add a query in a [filter context](docs-content://explore-analyze/query-filter/languages/querydsl.md) you can use the `filter()` method to make things easier: ```python from elasticsearch.dsl.query import Terms s = Search() s = s.filter(Terms("tags", ['search', 'python'])) ``` Behind the scenes this will produce a `Bool` query and place the specified `terms` query into its `filter` branch, making it equivalent to: ```python from elasticsearch.dsl.query import Terms, Bool s = Search() s = s.query(Bool(filter=[Terms("tags", ["search", "python"])])) ``` If you want to use the `post_filter` element for faceted navigation, use the `.post_filter()` method. The `exclude()` method works like `filter()`, but it applies the query as negated: ```python s = Search() s = s.exclude(Terms("tags", ['search', 'python'])) ``` which is shorthand for: ```python s = s.query(Bool(filter=[~Terms("tags", ["search", "python"])])) ``` #### Aggregations [_aggregations] As with queries, there are classes that represent each aggregation type, all accessible through the `elasticsearch.dsl.aggs` module: ```python from elasticsearch.dsl import aggs a = aggs.Terms(field="tags") # {"terms": {"field": "tags"}} ``` It is also possible to define an aggregation using the `A` shortcut: ```python from elasticsearch.dsl import A A('terms', field='tags') ``` To nest aggregations, you can use the `.bucket()`, `.metric()` and `.pipeline()` methods: ```python a = aggs.Terms(field="category") # {'terms': {'field': 'category'}} a.metric("clicks_per_category", aggs.Sum(field="clicks")) \ .bucket("tags_per_category", aggs.Terms(field="tags")) # { # 'terms': {'field': 'category'}, # 'aggs': { # 'clicks_per_category': {'sum': {'field': 'clicks'}}, # 'tags_per_category': {'terms': {'field': 'tags'}} # } # } ``` To add aggregations to the `Search` object, use the `.aggs` property, which acts as a top-level aggregation: ```python s = Search() a = aggs.Terms(field="category") s.aggs.bucket("category_terms", a) # { # 'aggs': { # 'category_terms': { # 'terms': { # 'field': 'category' # } # } # } # } ``` or ```python s = Search() s.aggs.bucket("articles_per_day", aggs.DateHistogram(field="publish_date", interval="day")) \ .metric("clicks_per_day", aggs.Sum(field="clicks")) \ .pipeline("moving_click_average", aggs.MovingAvg(buckets_path="clicks_per_day")) \ .bucket("tags_per_day", aggs.Terms(field="tags")) s.to_dict() # { # "aggs": { # "articles_per_day": { # "date_histogram": { "interval": "day", "field": "publish_date" }, # "aggs": { # "clicks_per_day": { "sum": { "field": "clicks" } }, # "moving_click_average": { "moving_avg": { "buckets_path": "clicks_per_day" } }, # "tags_per_day": { "terms": { "field": "tags" } } # } # } # } # } ``` You can access an existing bucket by its name: ```python s = Search() s.aggs.bucket("per_category", aggs.Terms(field="category")) s.aggs["per_category"].metric("clicks_per_category", aggs.Sum(field="clicks")) s.aggs["per_category"].bucket("tags_per_category", aggs.Terms(field="tags")) ``` ::::{note} When chaining multiple aggregations, there is a difference between what `.bucket()` and `.metric()` methods return - `.bucket()` returns the newly defined bucket while `.metric()` returns its parent bucket to allow further chaining. :::: As opposed to other methods on the `Search` objects, aggregations are defined in-place, without returning a new copy. #### K-Nearest Neighbor Searches [_k_nearest_neighbor_searches] To issue a kNN search, use the `.knn()` method: ```python s = Search() vector = get_embedding("search text") s = s.knn( field="embedding", k=5, num_candidates=10, query_vector=vector ) ``` The `field`, `k` and `num_candidates` arguments can be given as positional or keyword arguments and are required. In addition to these, `query_vector` or `query_vector_builder` must be given as well. The `.knn()` method can be invoked multiple times to include multiple kNN searches in the request. #### Sorting [_sorting] To specify sorting order, use the `.sort()` method: ```python s = Search().sort( 'category', '-title', {"lines" : {"order" : "asc", "mode" : "avg"}} ) ``` It accepts positional arguments which can be either strings or dictionaries. String value is a field name, optionally prefixed by the `-` sign to specify a descending order. To reset the sorting, just call the method with no arguments: ```python s = s.sort() ``` #### Pagination [_pagination] To specify the from/size parameters, apply the standard Python slicing operator on the `Search` instance: ```python s = s[10:20] # {"from": 10, "size": 10} s = s[:20] # {"size": 20} s = s[10:] # {"from": 10} s = s[10:20][2:] # {"from": 12, "size": 8} ``` If you want to access all the documents matched by your query you can use the `scan` method which uses the scan/scroll elasticsearch API: ```python for hit in s.scan(): print(hit.title) ``` Note that in this case the results won’t be sorted. #### Highlighting [_highlighting] To set common attributes for highlighting use the `highlight_options` method: ```python s = s.highlight_options(order='score') ``` Enabling highlighting for individual fields is done using the `highlight` method: ```python s = s.highlight('title') # or, including parameters: s = s.highlight('title', fragment_size=50) ``` The fragments in the response will then be available on each `Result` object as `.meta.highlight.FIELD` which will contain the list of fragments: ```python response = s.execute() for hit in response: for fragment in hit.meta.highlight.title: print(fragment) ``` #### Suggestions [_suggestions] To specify a suggest request on your `Search` object use the `suggest` method: ```python # check for correct spelling s = s.suggest('my_suggestion', 'pyhton', term={'field': 'title'}) ``` The first argument is the name of the suggestions (name under which it will be returned), second is the actual text you wish the suggester to work on and the keyword arguments will be added to the suggest’s json as-is which means that it should be one of `term`, `phrase` or `completion` to indicate which type of suggester should be used. #### Collapsing [_collapsing] To collapse search results use the `collapse` method on your `Search` object: ```python s = Search().query(Match("message", "GET /search")) # collapse results by user_id s = s.collapse("user_id") ``` The top hits will only include one result per `user_id`. You can also expand each collapsed top hit with the `inner_hits` parameter, `max_concurrent_group_searches` being the number of concurrent requests allowed to retrieve the inner hits per group: ```python inner_hits = {"name": "recent_search", "size": 5, "sort": [{"@timestamp": "desc"}]} s = s.collapse("user_id", inner_hits=inner_hits, max_concurrent_group_searches=4) ``` #### More Like This Query [_more_like_this_query] To use Elasticsearch’s `more_like_this` functionality, you can use the MoreLikeThis query type. A simple example is below ```python from elasticsearch.dsl.query import MoreLikeThis from elasticsearch.dsl import Search my_text = 'I want to find something similar' s = Search() # We're going to match based only on two fields, in this case text and title s = s.query(MoreLikeThis(like=my_text, fields=['text', 'title'])) # You can also exclude fields from the result to make the response quicker in the normal way s = s.source(exclude=["text"]) response = s.execute() for hit in response: print(hit.title) ``` #### Extra properties and parameters [_extra_properties_and_parameters] To set extra properties of the search request, use the `.extra()` method. This can be used to define keys in the body that cannot be defined via a specific API method like `explain` or `search_after`: ```python s = s.extra(explain=True) ``` To set query parameters, use the `.params()` method: ```python s = s.params(routing="42") ``` If you need to limit the fields being returned by elasticsearch, use the `source()` method: ```python # only return the selected fields s = s.source(['title', 'body']) # don't return any fields, just the metadata s = s.source(False) # explicitly include/exclude fields s = s.source(includes=["title"], excludes=["user.*"]) # reset the field selection s = s.source(None) ``` #### Serialization and Deserialization [_serialization_and_deserialization] The search object can be serialized into a dictionary by using the `.to_dict()` method. You can also create a `Search` object from a `dict` using the `from_dict` class method. This will create a new `Search` object and populate it using the data from the dict: ```python s = Search.from_dict({"query": {"match": {"title": "python"}}}) ``` If you wish to modify an existing `Search` object, overriding it’s properties, instead use the `update_from_dict` method that alters an instance **in-place**: ```python s = Search(index='i') s.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42}) ``` ### Response [_response] You can execute your search by calling the `.execute()` method that will return a `Response` object. The `Response` object allows you access to any key from the response dictionary via attribute access. It also provides some convenient helpers: ```python response = s.execute() print(response.success()) # True print(response.took) # 12 print(response.hits.total.relation) # eq print(response.hits.total.value) # 142 print(response.suggest.my_suggestions) ``` If you want to inspect the contents of the `response` objects, just use its `to_dict` method to get access to the raw data for pretty printing. #### Hits [_hits] To access the hits returned by the search, use the `hits` property or just iterate over the `Response` object: ```python response = s.execute() print(f"Total {response.hits.total} hits found.") for h in response: print(h.title, h.body) ``` ::::{note} If you are only seeing partial results (e.g. 10000 or even 10 results), consider using the option `s.extra(track_total_hits=True)` to get a full hit count. :::: #### Result [_result] The individual hits is wrapped in a convenience class that allows attribute access to the keys in the returned dictionary. All the metadata for the results are accessible via `meta` (without the leading `_`): ```python response = s.execute() h = response.hits[0] print(f"/{h.meta.index}/{h.meta.doc_type}/{h.meta.id} returned with score {h.meta.score}") ``` ::::{note} If your document has a field called `meta` you have to access it using the get item syntax: `hit['meta']`. :::: #### Aggregations [_aggregations_2] Aggregations are available through the `aggregations` property: ```python for tag in response.aggregations.per_tag.buckets: print(tag.key, tag.max_lines.value) ``` ### `MultiSearch` [_multisearch] If you need to execute multiple searches at the same time you can use the `MultiSearch` class which will use the `_msearch` API: ```python from elasticsearch.dsl import MultiSearch, Search from elasticsearch.dsl.query import Term ms = MultiSearch(index='blogs') ms = ms.add(Search().filter(Term("tags", "python"))) ms = ms.add(Search().filter(Term("tags", 'elasticsearch'))) responses = ms.execute() for response in responses: print("Results for query %r." % response._search.query) for hit in response: print(hit.title) ``` ### `EmptySearch` [_emptysearch] The `EmptySearch` class can be used as a fully compatible version of `Search` that will return no results, regardless of any queries configured. ## Persistence [_persistence_2] You can use the DSL module to define your mappings and a basic persistent layer for your application. For more comprehensive examples have a look at the [DSL examples](https://github.com/elastic/elasticsearch-py/tree/main/examples/dsl) directory in the repository. ### Document [doc_type] If you want to create a model-like wrapper around your documents, use the `Document` class. It can also be used to create all the necessary mappings and settings in elasticsearch (see `life-cycle` for details). ```python from datetime import datetime from elasticsearch.dsl import Document, Date, Nested, Boolean, \ analyzer, InnerDoc, Completion, Keyword, Text html_strip = analyzer('html_strip', tokenizer="standard", filter=["standard", "lowercase", "stop", "snowball"], char_filter=["html_strip"] ) class Comment(InnerDoc): author = Text(fields={'raw': Keyword()}) content = Text(analyzer='snowball') created_at = Date() def age(self): return datetime.now() - self.created_at class Post(Document): title = Text() title_suggest = Completion() created_at = Date() published = Boolean() category = Text( analyzer=html_strip, fields={'raw': Keyword()} ) comments = Nested(Comment) class Index: name = 'blog' def add_comment(self, author, content): self.comments.append( Comment(author=author, content=content, created_at=datetime.now())) def save(self, ** kwargs): self.created_at = datetime.now() return super().save(** kwargs) ``` #### Data types [_data_types] The `Document` instances use native python types such as `str` and `datetime` for its attributes. In case of `Object` or `Nested` fields an instance of the `InnerDoc` subclass is used, as in the `add_comment` method in the above example, where we are creating an instance of the `Comment` class. There are some specific types that were created to make working with some field types easier, for example the `Range` object used in any of the [range fields](elasticsearch://reference/elasticsearch/mapping-reference/range.md): ```python from elasticsearch.dsl import Document, DateRange, Keyword, Range class RoomBooking(Document): room = Keyword() dates = DateRange() rb = RoomBooking( room='Conference Room II', dates=Range( gte=datetime(2018, 11, 17, 9, 0, 0), lt=datetime(2018, 11, 17, 10, 0, 0) ) ) # Range supports the in operator correctly: datetime(2018, 11, 17, 9, 30, 0) in rb.dates # True # you can also get the limits and whether they are inclusive or exclusive: rb.dates.lower # datetime(2018, 11, 17, 9, 0, 0), True rb.dates.upper # datetime(2018, 11, 17, 10, 0, 0), False # empty range is unbounded Range().lower # None, False ``` #### Python Type Hints [_python_type_hints] Document fields can be defined using standard Python type hints if desired. Here are some simple examples: ```python from typing import Optional class Post(Document): title: str # same as title = Text(required=True) created_at: Optional[datetime] # same as created_at = Date(required=False) published: bool # same as published = Boolean(required=True) ``` It is important to note that when using `Field` subclasses such as `Text`, `Date` and `Boolean`, they must be given in the right-side of an assignment, as shown in examples above. Using these classes as type hints will result in errors. Python types are mapped to their corresponding field types according to the following table: | Python type | DSL field | | --- | --- | | `str` | `Text(required=True)` | | `bool` | `Boolean(required=True)` | | `int` | `Integer(required=True)` | | `float` | `Float(required=True)` | | `bytes` | `Binary(required=True)` | | `datetime` | `Date(required=True)` | | `date` | `Date(format="yyyy-MM-dd", required=True)` | To type a field as optional, the standard `Optional` modifier from the Python `typing` package can be used. When using Python 3.10 or newer, "pipe" syntax can also be used, by adding `| None` to a type. The `List` modifier can be added to a field to convert it to an array, similar to using the `multi=True` argument on the field object. ```python from typing import Optional, List class MyDoc(Document): pub_date: Optional[datetime] # same as pub_date = Date() middle_name: str | None # same as middle_name = Text() authors: List[str] # same as authors = Text(multi=True, required=True) comments: Optional[List[str]] # same as comments = Text(multi=True) ``` A field can also be given a type hint of an `InnerDoc` subclass, in which case it becomes an `Object` field of that class. When the `InnerDoc` subclass is wrapped with `List`, a `Nested` field is created instead. ```python from typing import List class Address(InnerDoc): ... class Comment(InnerDoc): ... class Post(Document): address: Address # same as address = Object(Address, required=True) comments: List[Comment] # same as comments = Nested(Comment, required=True) ``` Unfortunately it is impossible to have Python type hints that uniquely identify every possible Elasticsearch field type. To choose a field type that is different than the one that is assigned according to the table above, the desired field instance can be added explicitly as a right-side assignment in the field declaration. The next example creates a field that is typed as `Optional[str]`, but is mapped to `Keyword` instead of `Text`: ```python class MyDocument(Document): category: Optional[str] = Keyword() ``` This form can also be used when additional options need to be given to initialize the field, such as when using custom analyzer settings: ```python class Comment(InnerDoc): content: str = Text(analyzer='snowball') ``` When using type hints as above, subclasses of `Document` and `InnerDoc` inherit some of the behaviors associated with Python dataclasses, as defined by [PEP 681](https://peps.python.org/pep-0681/) and the [dataclass_transform decorator](https://typing.readthedocs.io/en/latest/spec/dataclasses.html#dataclass-transform). To add per-field dataclass options such as `default` or `default_factory`, the `mapped_field()` wrapper can be used on the right side of a typed field declaration: ```python class MyDocument(Document): title: str = mapped_field(default="no title") created_at: datetime = mapped_field(default_factory=datetime.now) published: bool = mapped_field(default=False) category: str = mapped_field(Keyword(), default="general") ``` When using the `mapped_field()` wrapper function, an explicit field type instance can be passed as a first positional argument, as the `category` field does in the example above. Static type checkers such as [mypy](https://mypy-lang.org/) and [pyright](https://github.com/microsoft/pyright) can use the type hints and the dataclass-specific options added to the `mapped_field()` function to improve type inference and provide better real-time code completion and suggestions in IDEs. One situation in which type checkers can’t infer the correct type is when using fields as class attributes. Consider the following example: ```python class MyDocument(Document): title: str doc = MyDocument() # doc.title is typed as "str" (correct) # MyDocument.title is also typed as "str" (incorrect) ``` To help type checkers correctly identify class attributes as such, the `M` generic must be used as a wrapper to the type hint, as shown in the next examples: ```python from elasticsearch.dsl import M class MyDocument(Document): title: M[str] created_at: M[datetime] = mapped_field(default_factory=datetime.now) doc = MyDocument() # doc.title is typed as "str" # doc.created_at is typed as "datetime" # MyDocument.title is typed as "InstrumentedField" # MyDocument.created_at is typed as "InstrumentedField" ``` Note that the `M` type hint does not provide any runtime behavior and its use is not required, but it can be useful to eliminate spurious type errors in IDEs or type checking builds. The `InstrumentedField` objects returned when fields are accessed as class attributes are proxies for the field instances that can be used anywhere a field needs to be referenced, such as when specifying sort options in a `Search` object: ```python # sort by creation date descending, and title ascending s = MyDocument.search().sort(-MyDocument.created_at, MyDocument.title) ``` When specifying sorting order, the `+` and `-` unary operators can be used on the class field attributes to indicate ascending and descending order. Finally, the `ClassVar` annotation can be used to define a regular class attribute that should not be mapped to the Elasticsearch index: ```python from typing import ClassVar class MyDoc(Document): title: M[str] created_at: M[datetime] = mapped_field(default_factory=datetime.now) my_var: ClassVar[str] # regular class variable, ignored by Elasticsearch ``` #### Note on dates [_note_on_dates] The DSL module will always respect the timezone information (or lack thereof) on the `datetime` objects passed in or stored in Elasticsearch. Elasticsearch itself interprets all datetimes with no timezone information as `UTC`. If you wish to reflect this in your python code, you can specify `default_timezone` when instantiating a `Date` field: ```python class Post(Document): created_at = Date(default_timezone='UTC') ``` In that case any `datetime` object passed in (or parsed from elasticsearch) will be treated as if it were in `UTC` timezone. #### Document life cycle [life-cycle] Before you first use the `Post` document type, you need to create the mappings in Elasticsearch. For that you can either use the `index` object or create the mappings directly by calling the `init` class method: ```python # create the mappings in Elasticsearch Post.init() ``` This code will typically be run in the setup for your application during a code deploy, similar to running database migrations. To create a new `Post` document just instantiate the class and pass in any fields you wish to set, you can then use standard attribute setting to change/add more fields. Note that you are not limited to the fields defined explicitly: ```python # instantiate the document first = Post(title='My First Blog Post, yay!', published=True) # assign some field values, can be values or lists of values first.category = ['everything', 'nothing'] # every document has an id in meta first.meta.id = 47 # save the document into the cluster first.save() ``` All the metadata fields (`id`, `routing`, `index` etc) can be accessed (and set) via a `meta` attribute or directly using the underscored variant: ```python post = Post(meta={'id': 42}) # prints 42 print(post.meta.id) # override default index post.meta.index = 'my-blog' ``` ::::{note} Having all metadata accessible through `meta` means that this name is reserved and you shouldn’t have a field called `meta` on your document. If you, however, need it you can still access the data using the get item (as opposed to attribute) syntax: `post['meta']`. :::: To retrieve an existing document use the `get` class method: ```python # retrieve the document first = Post.get(id=42) # now we can call methods, change fields, ... first.add_comment('me', 'This is nice!') # and save the changes into the cluster again first.save() ``` The [Update API](https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-document) can also be used via the `update` method. By default any keyword arguments, beyond the parameters of the API, will be considered fields with new values. Those fields will be updated on the local copy of the document and then sent over as partial document to be updated: ```python # retrieve the document first = Post.get(id=42) # you can update just individual fields which will call the update API # and also update the document in place first.update(published=True, published_by='me') ``` In case you wish to use a `painless` script to perform the update you can pass in the script string as `script` or the `id` of a [stored script](docs-content://explore-analyze/scripting/modules-scripting-using.md#script-stored-scripts) via `script_id`. All additional keyword arguments to the `update` method will then be passed in as parameters of the script. The document will not be updated in place. ```python # retrieve the document first = Post.get(id=42) # we execute a script in elasticsearch with additional kwargs being passed # as params into the script first.update(script='ctx._source.category.add(params.new_category)', new_category='testing') ``` If the document is not found in elasticsearch an exception (`elasticsearch.NotFoundError`) will be raised. If you wish to return `None` instead just pass in `ignore=404` to suppress the exception: ```python p = Post.get(id='not-in-es', ignore=404) p is None ``` When you wish to retrieve multiple documents at the same time by their `id` you can use the `mget` method: ```python posts = Post.mget([42, 47, 256]) ``` `mget` will, by default, raise a `NotFoundError` if any of the documents wasn’t found and `RequestError` if any of the document had resulted in error. You can control this behavior by setting parameters: * `raise_on_error`: If `True` (default) then any error will cause an exception to be raised. Otherwise all documents containing errors will be treated as missing. * `missing`: Can have three possible values: `'none'` (default), `'raise'` and `'skip'`. If a document is missing or errored it will either be replaced with `None`, an exception will be raised or the document will be skipped in the output list entirely. The index associated with the `Document` is accessible via the `_index` class property which gives you access to the `index` class. The `_index` attribute is also home to the `load_mappings` method which will update the mapping on the `Index` from elasticsearch. This is very useful if you use dynamic mappings and want the class to be aware of those fields (for example if you wish the `Date` fields to be properly (de)serialized): ```python Post._index.load_mappings() ``` To delete a document just call its `delete` method: ```python first = Post.get(id=42) first.delete() ``` #### Analysis [_analysis] To specify `analyzer` values for `Text` fields you can just use the name of the analyzer (as a string) and either rely on the analyzer being defined (like built-in analyzers) or define the analyzer yourself manually. Alternatively you can create your own analyzer and have the persistence layer handle its creation, from our example earlier: ```python from elasticsearch.dsl import analyzer, tokenizer my_analyzer = analyzer('my_analyzer', tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=['lowercase'] ) ``` Each analysis object needs to have a name (`my_analyzer` and `trigram` in our example) and tokenizers, token filters and char filters also need to specify type (`nGram` in our example). Once you have an instance of a custom `analyzer` you can also call the [analyze API](https://www.elastic.co/docs/api/doc/elasticsearch/v8/group/endpoint-indices) on it by using the `simulate` method: ```python response = my_analyzer.simulate('Hello World!') # ['hel', 'ell', 'llo', 'lo ', 'o w', ' wo', 'wor', 'orl', 'rld', 'ld!'] tokens = [t.token for t in response.tokens] ``` ::::{note} When creating a mapping which relies on a custom analyzer the index must either not exist or be closed. To create multiple `Document`-defined mappings you can use the `index` object. :::: #### Search [_search_2] To search for this document type, use the `search` class method: ```python # by calling .search we get back a standard Search object s = Post.search() # the search is already limited to the index and doc_type of our document s = s.filter('term', published=True).query('match', title='first') results = s.execute() # when you execute the search the results are wrapped in your document class (Post) for post in results: print(post.meta.score, post.title) ``` Alternatively you can just take a `Search` object and restrict it to return our document type, wrapped in correct class: ```python s = Search() s = s.doc_type(Post) ``` You can also combine document classes with standard doc types (just strings), which will be treated as before. You can also pass in multiple `Document` subclasses and each document in the response will be wrapped in it’s class. If you want to run suggestions, just use the `suggest` method on the `Search` object: ```python s = Post.search() s = s.suggest('title_suggestions', 'pyth', completion={'field': 'title_suggest'}) response = s.execute() for result in response.suggest.title_suggestions: print('Suggestions for %s:' % result.text) for option in result.options: print(' %s (%r)' % (option.text, option.payload)) ``` #### `class Meta` options [_class_meta_options] In the `Meta` class inside your document definition you can define various metadata for your document: * `mapping`: optional instance of `Mapping` class to use as base for the mappings created from the fields on the document class itself. Any attributes on the `Meta` class that are instance of `MetaField` will be used to control the mapping of the meta fields (`_all`, `dynamic` etc). Just name the parameter (without the leading underscore) as the field you wish to map and pass any parameters to the `MetaField` class: ```python class Post(Document): title = Text() class Meta: all = MetaField(enabled=False) dynamic = MetaField('strict') ``` #### `class Index` options [_class_index_options] This section of the `Document` definition can contain any information about the index, its name, settings and other attributes: * `name`: name of the index to use, if it contains a wildcard (`*`) then it cannot be used for any write operations and an `index` kwarg will have to be passed explicitly when calling methods like `.save()`. * `using`: default connection alias to use, defaults to `'default'` * `settings`: dictionary containing any settings for the `Index` object like `number_of_shards`. * `analyzers`: additional list of analyzers that should be defined on an index (see `analysis` for details). * `aliases`: dictionary with any aliases definitions #### Document Inheritance [_document_inheritance] You can use standard Python inheritance to extend models, this can be useful in a few scenarios. For example if you want to have a `BaseDocument` defining some common fields that several different `Document` classes should share: ```python class User(InnerDoc): username: str = mapped_field(Text(fields={'keyword': Keyword()})) email: str class BaseDocument(Document): created_by: User created_date: datetime last_updated: datetime def save(**kwargs): if not self.created_date: self.created_date = datetime.now() self.last_updated = datetime.now() return super(BaseDocument, self).save(**kwargs) class BlogPost(BaseDocument): class Index: name = 'blog' ``` Another use case would be using the [join type](elasticsearch://reference/elasticsearch/mapping-reference/parent-join.md) to have multiple different entities in a single index. You can see an [example](https://github.com/elastic/elasticsearch-py/blob/master/examples/dsl/parent_child.py) of this approach. Note that in this case, if the subclasses don’t define their own Index classes, the mappings are merged and shared between all the subclasses. ### Index [_index] In typical scenario using `class Index` on a `Document` class is sufficient to perform any action. In a few cases though it can be useful to manipulate an `Index` object directly. `Index` is a class responsible for holding all the metadata related to an index in elasticsearch - mappings and settings. It is most useful when defining your mappings since it allows for easy creation of multiple mappings at the same time. This is especially useful when setting up your elasticsearch objects in a migration: ```python from elasticsearch.dsl import Index, Document, Text, analyzer blogs = Index('blogs') # define custom settings blogs.settings( number_of_shards=1, number_of_replicas=0 ) # define aliases blogs.aliases( old_blogs={} ) # register a document with the index blogs.document(Post) # can also be used as class decorator when defining the Document @blogs.document class Post(Document): title: str # You can attach custom analyzers to the index html_strip = analyzer('html_strip', tokenizer="standard", filter=["standard", "lowercase", "stop", "snowball"], char_filter=["html_strip"] ) blogs.analyzer(html_strip) # delete the index, ignore if it doesn't exist blogs.delete(ignore=404) # create the index in elasticsearch blogs.create() ``` You can also set up a template for your indices and use the `clone` method to create specific copies: ```python blogs = Index('blogs', using='production') blogs.settings(number_of_shards=2) blogs.document(Post) # create a copy of the index with different name company_blogs = blogs.clone('company-blogs') # create a different copy on different cluster dev_blogs = blogs.clone('blogs', using='dev') # and change its settings dev_blogs.setting(number_of_shards=1) ``` #### IndexTemplate [index-template] The DSL module also exposes an option to manage [index templates](docs-content://manage-data/data-store/templates.md) in elasticsearch using the `ComposableIndexTemplate` and `IndexTemplate` classes, which have very similar API to `Index`. ::::{note} Composable index templates should be always be preferred over the legacy index templates, since the latter are deprecated. :::: Once an index template is saved in Elasticsearch its contents will be automatically applied to new indices (existing indices are completely unaffected by templates) that match the template pattern (any index starting with `blogs-` in our example), even if the index is created automatically upon indexing a document into that index. Potential workflow for a set of time based indices governed by a single template: ```python from datetime import datetime from elasticsearch.dsl import Document, Date, Text class Log(Document): content: str timestamp: datetime class Index: name = "logs-*" def save(self, **kwargs): # assign now if no timestamp given if not self.timestamp: self.timestamp = datetime.now() # override the index to go to the proper timeslot kwargs['index'] = self.timestamp.strftime('logs-%Y%m%d') return super().save(**kwargs) # once, as part of application setup, during deploy/migrations: logs = Log._index.as_composable_template('logs', priority=100) logs.save() # to perform search across all logs: search = Log.search() ``` ## Faceted Search [faceted_search] The library comes with a simple abstraction aimed at helping you develop faceted navigation for your data. ### Configuration [_configuration_2] You can provide several configuration options (as class attributes) when declaring a `FacetedSearch` subclass: * `index`: the name of the index (as string) to search through, defaults to `'_all'`. * `doc_types`: list of `Document` subclasses or strings to be used, defaults to `['_all']`. * `fields`: list of fields on the document type to search through. The list will be passes to `MultiMatch` query so can contain boost values (`'title^5'`), defaults to `['*']`. * `facets`: dictionary of facets to display/filter on. The key is the name displayed and values should be instances of any `Facet` subclass, for example: `{'tags': TermsFacet(field='tags')}` #### Facets [_facets] There are several different facets available: * `TermsFacet`: provides an option to split documents into groups based on a value of a field, for example `TermsFacet(field='category')` * `DateHistogramFacet`: split documents into time intervals, example: `DateHistogramFacet(field="published_date", calendar_interval="day")` * `HistogramFacet`: similar to `DateHistogramFacet` but for numerical values: `HistogramFacet(field="rating", interval=2)` * `RangeFacet`: allows you to define your own ranges for a numerical fields: `RangeFacet(field="comment_count", ranges=[("few", (None, 2)), ("lots", (2, None))])` * `NestedFacet`: is just a simple facet that wraps another to provide access to nested documents: `NestedFacet('variants', TermsFacet(field='variants.color'))` By default facet results will only calculate document count, if you wish for a different metric you can pass in any single value metric aggregation as the `metric` kwarg (`TermsFacet(field='tags', metric=A('max', field=timestamp))`). When specifying `metric` the results will be, by default, sorted in descending order by that metric. To change it to ascending specify `metric_sort="asc"` and to just sort by document count use `metric_sort=False`. #### Advanced [_advanced] If you require any custom behavior or modifications simply override one or more of the methods responsible for the class' functions: * `search(self)`: is responsible for constructing the `Search` object used. Override this if you want to customize the search object (for example by adding a global filter for published articles only). * `query(self, search)`: adds the query position of the search (if search input specified), by default using `MultiField` query. Override this if you want to modify the query type used. * `highlight(self, search)`: defines the highlighting on the `Search` object and returns a new one. Default behavior is to highlight on all fields specified for search. ### Usage [_usage] The custom subclass can be instantiated empty to provide an empty search (matching everything) or with `query`, `filters` and `sort`. * `query`: is used to pass in the text of the query to be performed. If `None` is passed in (default) a `MatchAll` query will be used. For example `'python web'` * `filters`: is a dictionary containing all the facet filters that you wish to apply. Use the name of the facet (from `.facets` attribute) as the key and one of the possible values as value. For example `{'tags': 'python'}`. * `sort`: is a tuple or list of fields on which the results should be sorted. The format of the individual fields are to be the same as those passed to `~elasticsearch.dsl.Search.sort`. #### Response [_response_2] the response returned from the `FacetedSearch` object (by calling `.execute()`) is a subclass of the standard `Response` class that adds a property called `facets` which contains a dictionary with lists of buckets -each represented by a tuple of key, document count and a flag indicating whether this value has been filtered on. ### Example [_example] ```python from datetime import date from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet class BlogSearch(FacetedSearch): doc_types = [Article, ] # fields that should be searched fields = ['tags', 'title', 'body'] facets = { # use bucket aggregations to define facets 'tags': TermsFacet(field='tags'), 'publishing_frequency': DateHistogramFacet(field='published_from', interval='month') } def search(self): # override methods to add custom pieces s = super().search() return s.filter('range', publish_from={'lte': 'now/h'}) bs = BlogSearch('python web', {'publishing_frequency': date(2015, 6)}) response = bs.execute() # access hits and other attributes as usual total = response.hits.total print('total hits', total.relation, total.value) for hit in response: print(hit.meta.score, hit.title) for (tag, count, selected) in response.facets.tags: print(tag, ' (SELECTED):' if selected else ':', count) for (month, count, selected) in response.facets.publishing_frequency: print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count) ``` ## Update By Query [update_by_query] ### The `Update By Query` object [_the_update_by_query_object] The `Update By Query` object enables the use of the [_update_by_query](https://www.elastic.co/docs/api/doc/elasticsearch/v8/operation/operation-update-by-query) endpoint to perform an update on documents that match a search query. The object is implemented as a modification of the `Search` object, containing a subset of its query methods, as well as a script method, which is used to make updates. The `Update By Query` object implements the following `Search` query types: * queries * filters * excludes For more information on queries, see the `search_dsl` chapter. Like the `Search` object, the API is designed to be chainable. This means that the `Update By Query` object is immutable: all changes to the object will result in a shallow copy being created which contains the changes. This means you can safely pass the `Update By Query` object to foreign code without fear of it modifying your objects as long as it sticks to the `Update By Query` object APIs. You can define your client in a number of ways, but the preferred method is to use a global configuration. For more information on defining a client, see the `configuration` chapter. Once your client is defined, you can instantiate a copy of the `Update By Query` object as seen below: ```python from elasticsearch.dsl import UpdateByQuery ubq = UpdateByQuery().using(client) # or ubq = UpdateByQuery(using=client) ``` ::::{note} All methods return a *copy* of the object, making it safe to pass to outside code. :::: The API is chainable, allowing you to combine multiple method calls in one statement: ```python ubq = UpdateByQuery().using(client).query(Match("title", python")) ``` To send the request to Elasticsearch: ```python response = ubq.execute() ``` It should be noted, that there are limits to the chaining using the script method: calling script multiple times will overwrite the previous value. That is, only a single script can be sent with a call. An attempt to use two scripts will result in only the second script being stored. Given the below example: ```python ubq = UpdateByQuery() \ .using(client) \ .script(source="ctx._source.likes++") \ .script(source="ctx._source.likes+=2") ``` This means that the stored script by this client will be `'source': 'ctx._source.likes{{plus}}=2'` and the previous call will not be stored. For debugging purposes you can serialize the `Update By Query` object to a `dict` explicitly: ```python print(ubq.to_dict()) ``` Also, to use variables in script see below example: ```python ubq.script( source="ctx._source.messages.removeIf(x -> x.somefield == params.some_var)", params={ 'some_var': 'some_string_val' } ) ``` #### Serialization and Deserialization [_serialization_and_deserialization_2] The search object can be serialized into a dictionary by using the `.to_dict()` method. You can also create a `Update By Query` object from a `dict` using the `from_dict` class method. This will create a new `Update By Query` object and populate it using the data from the dict: ```python ubq = UpdateByQuery.from_dict({"query": {"match": {"title": "python"}}}) ``` If you wish to modify an existing `Update By Query` object, overriding it’s properties, instead use the `update_from_dict` method that alters an instance **in-place**: ```python ubq = UpdateByQuery(index='i') ubq.update_from_dict({"query": {"match": {"title": "python"}}, "size": 42}) ``` #### Extra properties and parameters [_extra_properties_and_parameters_2] To set extra properties of the search request, use the `.extra()` method. This can be used to define keys in the body that cannot be defined via a specific API method like `explain`: ```python ubq = ubq.extra(explain=True) ``` To set query parameters, use the `.params()` method: ```python ubq = ubq.params(routing="42") ``` ### Response [_response_3] You can execute your search by calling the `.execute()` method that will return a `Response` object. The `Response` object allows you access to any key from the response dictionary via attribute access. It also provides some convenient helpers: ```python response = ubq.execute() print(response.success()) # True print(response.took) # 12 ``` If you want to inspect the contents of the `response` objects, just use its `to_dict` method to get access to the raw data for pretty printing. ## ES|QL Queries When working with `Document` classes, you can use the ES|QL query language to retrieve documents. For this you can use the `esql_from()` and `esql_execute()` methods available to all sub-classes of `Document`. Consider the following `Employee` document definition: ```python from elasticsearch.dsl import Document, InnerDoc, M class Address(InnerDoc): address: M[str] city: M[str] zip_code: M[str] class Employee(Document): emp_no: M[int] first_name: M[str] last_name: M[str] height: M[float] still_hired: M[bool] address: M[Address] class Index: name = 'employees' ``` The `esql_from()` method creates a base ES|QL query for the index associated with the document class. The following example creates a base query for the `Employee` class: ```python query = Employee.esql_from() ``` This query includes a `FROM` command with the index name, and a `KEEP` command that retrieves all the document attributes. To execute this query and receive the results, you can pass the query to the `esql_execute()` method: ```python for emp in Employee.esql_execute(query): print(f"{emp.name} from {emp.address.city} is {emp.height:.2f}m tall") ``` In this example, the `esql_execute()` class method runs the query and returns all the documents in the index, up to the maximum of 1000 results allowed by ES|QL. Here is a possible output from this example: ``` Kevin Macias from North Robert is 1.60m tall Drew Harris from Boltonshire is 1.68m tall Julie Williams from Maddoxshire is 1.99m tall Christopher Jones from Stevenbury is 1.98m tall Anthony Lopez from Port Sarahtown is 2.42m tall Tricia Stone from North Sueshire is 2.39m tall Katherine Ramirez from Kimberlyton is 1.83m tall ... ``` To search for specific documents you can extend the base query with additional ES|QL commands that narrow the search criteria. The next example searches for documents that include only employees that are taller than 2 meters, sorted by their last name. It also limits the results to 4 people: ```python query = ( Employee.esql_from() .where(Employee.height > 2) .sort(Employee.last_name) .limit(4) ) ``` When running this query with the same for-loop shown above, possible results would be: ``` Michael Adkins from North Stacey is 2.48m tall Kimberly Allen from Toddside is 2.24m tall Crystal Austin from East Michaelchester is 2.30m tall Rebecca Berger from Lake Adrianside is 2.40m tall ``` ### Additional fields ES|QL provides a few ways to add new fields to a query, for example through the `EVAL` command. The following example shows a query that adds an evaluated field: ```python from elasticsearch.esql import E, functions query = ( Employee.esql_from() .eval(height_cm=functions.round(Employee.height * 100)) .where(E("height_cm") >= 200) .sort(Employee.last_name) .limit(10) ) ``` In this example we are adding the height in centimeters to the query, calculated from the `height` document field, which is in meters. The `height_cm` calculated field is available to use in other query clauses, and in particular is referenced in `where()` in this example. Note how the new field is given as `E("height_cm")` in this clause. The `E()` wrapper tells the query builder that the argument is an ES|QL field name and not a string literal. This is done automatically for document fields that are given as class attributes, such as `Employee.height` in the `eval()`. The `E()` wrapper is only needed for fields that are not in the document. By default, the `esql_execute()` method returns only document instances. To receive any additional fields that are not part of the document in the query results, the `return_additional=True` argument can be passed to it, and then the results are returned as tuples with the document as first element, and a dictionary with the additional fields as second element: ```python for emp, additional in Employee.esql_execute(query, return_additional=True): print(emp.name, additional) ``` Example output from the query given above: ``` Michael Adkins {'height_cm': 248.0} Kimberly Allen {'height_cm': 224.0} Crystal Austin {'height_cm': 230.0} Rebecca Berger {'height_cm': 240.0} Katherine Blake {'height_cm': 214.0} Edward Butler {'height_cm': 246.0} Steven Carlson {'height_cm': 242.0} Mark Carter {'height_cm': 240.0} Joseph Castillo {'height_cm': 229.0} Alexander Cohen {'height_cm': 245.0} ``` ### Missing fields The base query returned by the `esql_from()` method includes a `KEEP` command with the complete list of fields that are part of the document. If any subsequent clauses added to the query remove fields that are part of the document, then the `esql_execute()` method will raise an exception, because it will not be able construct complete document instances to return as results. To prevent errors, it is recommended that the `keep()` and `drop()` clauses are not used when working with `Document` instances. If a query has missing fields, it can be forced to execute without errors by passing the `ignore_missing_fields=True` argument to `esql_execute()`. When this option is used, returned documents will have any missing fields set to `None`. ## Using asyncio with Elasticsearch Python DSL [asyncio] The DSL module supports async/await with [asyncio](https://docs.python.org/3/library/asyncio.html). To ensure that you have all the required dependencies, install the `[async]` extra: ```bash $ python -m pip install "elasticsearch[async]" ``` ### Connections [_connections] Use the `async_connections` module to manage your asynchronous connections. ```python from elasticsearch.dsl import async_connections async_connections.create_connection(hosts=['localhost'], timeout=20) ``` All the options available in the `connections` module can be used with `async_connections`. #### How to avoid *Unclosed client session / connector* warnings on exit [_how_to_avoid_unclosed_client_session_connector_warnings_on_exit] These warnings come from the `aiohttp` package, which is used internally by the `AsyncElasticsearch` client. They appear often when the application exits and are caused by HTTP connections that are open when they are garbage collected. To avoid these warnings, make sure that you close your connections. ```python es = async_connections.get_connection() await es.close() ``` ### Search DSL [_search_dsl] Use the `AsyncSearch` class to perform asynchronous searches. ```python from elasticsearch.dsl import AsyncSearch from elasticsearch.dsl.query import Match s = AsyncSearch().query(Match("title", "python")) async for hit in s: print(hit.title) ``` Instead of using the `AsyncSearch` object as an asynchronous iterator, you can explicitly call the `execute()` method to get a `Response` object. ```python s = AsyncSearch().query(Match("title", "python")) response = await s.execute() for hit in response: print(hit.title) ``` An `AsyncMultiSearch` is available as well. ```python from elasticsearch.dsl import AsyncMultiSearch from elasticsearch.dsl.query import Term ms = AsyncMultiSearch(index='blogs') ms = ms.add(AsyncSearch().filter(Term("tags", "python"))) ms = ms.add(AsyncSearch().filter(Term("tags", "elasticsearch"))) responses = await ms.execute() for response in responses: print("Results for query %r." % response.search.query) for hit in response: print(hit.title) ``` ### Asynchronous Documents, Indexes, and more [_asynchronous_documents_indexes_and_more] The `Document`, `Index`, `IndexTemplate`, `Mapping`, `UpdateByQuery` and `FacetedSearch` classes all have asynchronous versions that use the same name with an `Async` prefix. These classes expose the same interfaces as the synchronous versions, but any methods that perform I/O are defined as coroutines. Auxiliary classes that do not perform I/O do not have asynchronous versions. The same classes can be used in synchronous and asynchronous applications. When using a custom analyzer in an asynchronous application, use the `async_simulate()` method to invoke the Analyze API on it. Consult the `api` section for details about each specific method. python-elasticsearch-9.1.1/docs/reference/dsl_migrating.md000066400000000000000000000024061506101734100237060ustar00rootroot00000000000000# Migrating from the `elasticsearch-dsl` package [_migrating_from_elasticsearch_dsl_package] In the past the Elasticsearch Python DSL module was distributed as a standalone package called `elasticsearch-dsl`. This package is now deprecated, since all its functionality has been integrated into the main Python client. We recommend all developers to migrate their applications and eliminate their dependency on the `elasticsearch-dsl` package. To migrate your application, all references to `elasticsearch_dsl` as a top-level package must be changed to `elasticsearch.dsl`. In other words, the underscore from the package name should be replaced by a period. Here are a few examples: ```python # from: from elasticsearch_dsl import Date, Document, InnerDoc, Text, connections # to: from elasticsearch.dsl import Date, Document, InnerDoc, Text, connections # from: from elasticsearch_dsl.query import MultiMatch # to: from elasticsearch.dsl.query import MultiMatch # from: import elasticsearch_dsl as dsl # to: from elasticsearch import dsl # from: import elasticsearch_dsl # to: from elasticsearch import dsl as elasticsearch_dsl # from: import elasticsearch_dsl # to: from elasticsearch import dsl # and replace all references to "elasticsearch_dsl" in the code with "dsl" ``` python-elasticsearch-9.1.1/docs/reference/dsl_tutorials.md000066400000000000000000000170051506101734100237540ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/_tutorials.html --- # Tutorials [_tutorials] ## Search [_search] Let’s have a typical search request written directly as a `dict`: ```python from elasticsearch import Elasticsearch client = Elasticsearch("https://localhost:9200") response = client.search( index="my-index", body={ "query": { "bool": { "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"description": "beta"}}], "filter": [{"term": {"category": "search"}}] } }, "aggs" : { "per_tag": { "terms": {"field": "tags"}, "aggs": { "max_lines": {"max": {"field": "lines"}} } } } } ) for hit in response['hits']['hits']: print(hit['_score'], hit['_source']['title']) for tag in response['aggregations']['per_tag']['buckets']: print(tag['key'], tag['max_lines']['value']) ``` The problem with this approach is that it is very verbose, prone to syntax mistakes like incorrect nesting, hard to modify (eg. adding another filter) and definitely not fun to write. Let’s rewrite the example using the DSL module: ```python from elasticsearch import Elasticsearch from elasticsearch.dsl import Search, query, aggs client = Elasticsearch("https://localhost:9200") s = Search(using=client, index="my-index") \ .query(query.Match("title", "python")) \ .filter(query.Term("category", "search")) \ .exclude(query.Match("description", "beta")) s.aggs.bucket('per_tag', aggs.Terms(field="tags")) \ .metric('max_lines', aggs.Max(field='lines')) response = s.execute() for hit in response: print(hit.meta.score, hit.title) for tag in response.aggregations.per_tag.buckets: print(tag.key, tag.max_lines.value) ``` As you see, the DSL module took care of: * creating appropriate `Query` objects from classes * composing queries into a compound `bool` query * putting the `term` query in a filter context of the `bool` query * providing a convenient access to response data * no curly or square brackets everywhere ## Persistence [_persistence] Let’s have a simple Python class representing an article in a blogging system: ```python from datetime import datetime from elasticsearch.dsl import Document, Date, Integer, Keyword, Text, connections, mapped_field # Define a default Elasticsearch client connections.create_connection(hosts="https://localhost:9200") class Article(Document): title: str = mapped_field(Text(analyzer='snowball', fields={'raw': Keyword()})) body: str = mapped_field(Text(analyzer='snowball')) tags: list[str] = mapped_field(Keyword()) published_from: datetime lines: int class Index: name = 'blog' settings = { "number_of_shards": 2, } def save(self, **kwargs): self.lines = len(self.body.split()) return super(Article, self).save(** kwargs) def is_published(self): return datetime.now() > self.published_from # create the mappings in elasticsearch Article.init() # create and save and article article = Article(meta={'id': 42}, title='Hello world!', tags=['test']) article.body = ''' looong text ''' article.published_from = datetime.now() article.save() article = Article.get(id=42) print(article.is_published()) # Display cluster health print(connections.get_connection().cluster.health()) ``` In this example you can see: * providing a default connection * defining fields with Python type hints and additional mapping configuration when necessary * setting index name * defining custom methods * overriding the built-in `.save()` method to hook into the persistence life cycle * retrieving and saving the object into Elasticsearch * accessing the underlying client for other APIs You can see more in the `persistence` chapter. ## Pre-built Faceted Search [_pre_built_faceted_search] If you have your `Document`s defined you can very easily create a faceted search class to simplify searching and filtering. ```python from elasticsearch.dsl import FacetedSearch, TermsFacet, DateHistogramFacet class BlogSearch(FacetedSearch): doc_types = [Article, ] # fields that should be searched fields = ['tags', 'title', 'body'] facets = { # use bucket aggregations to define facets 'tags': TermsFacet(field='tags'), 'publishing_frequency': DateHistogramFacet(field='published_from', interval='month') } # empty search bs = BlogSearch() response = bs.execute() for hit in response: print(hit.meta.score, hit.title) for (tag, count, selected) in response.facets.tags: print(tag, ' (SELECTED):' if selected else ':', count) for (month, count, selected) in response.facets.publishing_frequency: print(month.strftime('%B %Y'), ' (SELECTED):' if selected else ':', count) ``` You can find more details in the `faceted_search` chapter. ## Update By Query [_update_by_query] Let’s resume the simple example of articles on a blog, and let’s assume that each article has a number of likes. For this example, imagine we want to increment the number of likes by 1 for all articles that match a certain tag and do not match a certain description. Writing this as a `dict`, we would have the following code: ```python from elasticsearch import Elasticsearch client = Elasticsearch() response = client.update_by_query( index="my-index", body={ "query": { "bool": { "must": [{"match": {"tag": "python"}}], "must_not": [{"match": {"description": "beta"}}] } }, "script"={ "source": "ctx._source.likes++", "lang": "painless" } }, ) ``` Using the DSL, we can now express this query as such: ```python from elasticsearch import Elasticsearch from elasticsearch.dsl import Search, UpdateByQuery from elasticsearch.dsl.query import Match client = Elasticsearch() ubq = UpdateByQuery(using=client, index="my-index") \ .query(Match("title", "python")) \ .exclude(Match("description", "beta")) \ .script(source="ctx._source.likes++", lang="painless") response = ubq.execute() ``` As you can see, the `Update By Query` object provides many of the savings offered by the `Search` object, and additionally allows one to update the results of the search based on a script assigned in the same manner. ## ES|QL Queries The DSL module features an integration with the ES|QL query builder, consisting of two methods available in all `Document` sub-classes: `esql_from()` and `esql_execute()`. Using the `Article` document from above, we can search for up to ten articles that include `"world"` in their titles with the following ES|QL query: ```python from elasticsearch.esql import functions query = Article.esql_from().where(functions.match(Article.title, 'world')).limit(10) for a in Article.esql_execute(query): print(a.title) ``` Review the [ES|QL Query Builder section](esql-query-builder.md) to learn more about building ES|QL queries in Python. ## Migration from the standard client [_migration_from_the_standard_client] You don’t have to port your entire application to get the benefits of the DSL module, you can start gradually by creating a `Search` object from your existing `dict`, modifying it using the API and serializing it back to a `dict`: ```python body = {...} # insert complicated query here # Convert to Search object s = Search.from_dict(body) # Add some filters, aggregations, queries, ... s.filter(query.Term("tags", "python")) # Convert back to dict to plug back into existing code body = s.to_dict() ``` python-elasticsearch-9.1.1/docs/reference/elasticsearch-dsl.md000066400000000000000000000027651506101734100244650ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/elasticsearch-dsl.html --- # Elasticsearch Python DSL [elasticsearch-dsl] Elasticsearch DSL is a module of the official Python client that aims to help with writing and running queries against Elasticsearch in a more convenient and idiomatic way. It stays close to the Elasticsearch JSON DSL, mirroring its terminology and structure. It exposes the whole range of the DSL from Python either directly using defined classes or a queryset-like expressions. Here is an example: ```python from elasticsearch.dsl import Search from elasticsearch.dsl.query import Match, Term s = Search(index="my-index") \ .query(Match("title", "python")) \ .filter(Term("category", "search")) \ .exclude(Match("description", "beta")) for hit in s: print(hit.title) ``` Or with asynchronous Python: ```python from elasticsearch.dsl import AsyncSearch from elasticsearch.dsl.query import Match, Term async def run_query(): s = AsyncSearch(index="my-index") \ .query(Match("title", "python")) \ .filter(Term("category", "search")) \ .exclude(Match("description", "beta")) async for hit in s: print(hit.title) ``` It also provides an optional wrapper for working with documents as Python objects: defining mappings, retrieving and saving documents, wrapping the document data in user-defined classes. To use the other Elasticsearch APIs (eg. cluster health) just use the regular client. python-elasticsearch-9.1.1/docs/reference/esql-pandas.md000066400000000000000000001245761506101734100233100ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/esql-pandas.html --- # ES|QL and Pandas [esql-pandas] The [Elasticsearch Query Language (ES|QL)](docs-content://explore-analyze/query-filter/languages/esql.md) provides a powerful way to filter, transform, and analyze data stored in {{es}}. Designed to be easy to learn and use, it is a perfect fit for data scientists familiar with Pandas and other dataframe-based libraries. ES|QL queries produce tables with named columns, which is the definition of dataframes. This page shows you an example of using ES|QL and Pandas together to work with dataframes. ## Import data [import-data] Use the [`employees` sample data](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/employees.csv) and [mapping](https://github.com/elastic/elasticsearch/blob/main/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-default.json). The easiest way to load this dataset is to run [two Elasticsearch API requests](https://gist.github.com/pquentin/7cf29a5932cf52b293699dd994b1a276) in the Kibana Console. ::::{dropdown} Index mapping request ```console PUT employees { "mappings": { "properties": { "avg_worked_seconds": { "type": "long" }, "birth_date": { "type": "date" }, "emp_no": { "type": "integer" }, "first_name": { "type": "keyword" }, "gender": { "type": "keyword" }, "height": { "type": "double", "fields": { "float": { "type": "float" }, "half_float": { "type": "half_float" }, "scaled_float": { "type": "scaled_float", "scaling_factor": 100 } } }, "hire_date": { "type": "date" }, "is_rehired": { "type": "boolean" }, "job_positions": { "type": "keyword" }, "languages": { "type": "integer", "fields": { "byte": { "type": "byte" }, "long": { "type": "long" }, "short": { "type": "short" } } }, "last_name": { "type": "keyword" }, "salary": { "type": "integer" }, "salary_change": { "type": "double", "fields": { "int": { "type": "integer" }, "keyword": { "type": "keyword" }, "long": { "type": "long" } } }, "still_hired": { "type": "boolean" } } } } ``` :::: ::::{dropdown} Bulk request to ingest data ```console PUT employees/_bulk { "index": {}} {"birth_date":"1953-09-02T00:00:00Z","emp_no":"10001","first_name":"Georgi","gender":"M","hire_date":"1986-06-26T00:00:00Z","languages":"2","last_name":"Facello","salary":"57305","height":"2.03","still_hired":"true","avg_worked_seconds":"268728049","job_positions":["Senior Python Developer","Accountant"],"is_rehired":["false","true"],"salary_change":"1.19"} { "index": {}} {"birth_date":"1964-06-02T00:00:00Z","emp_no":"10002","first_name":"Bezalel","gender":"F","hire_date":"1985-11-21T00:00:00Z","languages":"5","last_name":"Simmel","salary":"56371","height":"2.08","still_hired":"true","avg_worked_seconds":"328922887","job_positions":"Senior Team Lead","is_rehired":["false","false"],"salary_change":["-7.23","11.17"]} { "index": {}} {"birth_date":"1959-12-03T00:00:00Z","emp_no":"10003","first_name":"Parto","gender":"M","hire_date":"1986-08-28T00:00:00Z","languages":"4","last_name":"Bamford","salary":"61805","height":"1.83","still_hired":"false","avg_worked_seconds":"200296405","salary_change":["14.68","12.82"]} { "index": {}} {"birth_date":"1954-05-01T00:00:00Z","emp_no":"10004","first_name":"Chirstian","gender":"M","hire_date":"1986-12-01T00:00:00Z","languages":"5","last_name":"Koblick","salary":"36174","height":"1.78","still_hired":"true","avg_worked_seconds":"311267831","job_positions":["Reporting Analyst","Tech Lead","Head Human Resources","Support Engineer"],"is_rehired":"true","salary_change":["3.65","-0.35","1.13","13.48"]} { "index": {}} {"birth_date":"1955-01-21T00:00:00Z","emp_no":"10005","first_name":"Kyoichi","gender":"M","hire_date":"1989-09-12T00:00:00Z","languages":"1","last_name":"Maliniak","salary":"63528","height":"2.05","still_hired":"true","avg_worked_seconds":"244294991","is_rehired":["false","false","false","true"],"salary_change":["-2.14","13.07"]} { "index": {}} {"birth_date":"1953-04-20T00:00:00Z","emp_no":"10006","first_name":"Anneke","gender":"F","hire_date":"1989-06-02T00:00:00Z","languages":"3","last_name":"Preusig","salary":"60335","height":"1.56","still_hired":"false","avg_worked_seconds":"372957040","job_positions":["Tech Lead","Principal Support Engineer","Senior Team Lead"],"salary_change":"-3.90"} { "index": {}} {"birth_date":"1957-05-23T00:00:00Z","emp_no":"10007","first_name":"Tzvetan","gender":"F","hire_date":"1989-02-10T00:00:00Z","languages":"4","last_name":"Zielinski","salary":"74572","height":"1.70","still_hired":"true","avg_worked_seconds":"393084805","is_rehired":["true","false","true","false"],"salary_change":["-7.06","1.99","0.57"]} { "index": {}} {"birth_date":"1958-02-19T00:00:00Z","emp_no":"10008","first_name":"Saniya","gender":"M","hire_date":"1994-09-15T00:00:00Z","languages":"2","last_name":"Kalloufi","salary":"43906","height":"2.10","still_hired":"true","avg_worked_seconds":"283074758","job_positions":["Senior Python Developer","Junior Developer","Purchase Manager","Internship"],"is_rehired":["true","false"],"salary_change":["12.68","3.54","0.75","-2.92"]} { "index": {}} {"birth_date":"1952-04-19T00:00:00Z","emp_no":"10009","first_name":"Sumant","gender":"F","hire_date":"1985-02-18T00:00:00Z","languages":"1","last_name":"Peac","salary":"66174","height":"1.85","still_hired":"false","avg_worked_seconds":"236805489","job_positions":["Senior Python Developer","Internship"]} { "index": {}} {"birth_date":"1963-06-01T00:00:00Z","emp_no":"10010","first_name":"Duangkaew","hire_date":"1989-08-24T00:00:00Z","languages":"4","last_name":"Piveteau","salary":"45797","height":"1.70","still_hired":"false","avg_worked_seconds":"315236372","job_positions":["Architect","Reporting Analyst","Tech Lead","Purchase Manager"],"is_rehired":["true","true","false","false"],"salary_change":["5.05","-6.77","4.69","12.15"]} { "index": {}} {"birth_date":"1953-11-07T00:00:00Z","emp_no":"10011","first_name":"Mary","hire_date":"1990-01-22T00:00:00Z","languages":"5","last_name":"Sluis","salary":"31120","height":"1.50","still_hired":"true","avg_worked_seconds":"239615525","job_positions":["Architect","Reporting Analyst","Tech Lead","Senior Team Lead"],"is_rehired":["true","true"],"salary_change":["10.35","-7.82","8.73","3.48"]} { "index": {}} {"birth_date":"1960-10-04T00:00:00Z","emp_no":"10012","first_name":"Patricio","hire_date":"1992-12-18T00:00:00Z","languages":"5","last_name":"Bridgland","salary":"48942","height":"1.97","still_hired":"false","avg_worked_seconds":"365510850","job_positions":["Head Human Resources","Accountant"],"is_rehired":["false","true","true","false"],"salary_change":"0.04"} { "index": {}} {"birth_date":"1963-06-07T00:00:00Z","emp_no":"10013","first_name":"Eberhardt","hire_date":"1985-10-20T00:00:00Z","languages":"1","last_name":"Terkki","salary":"48735","height":"1.94","still_hired":"true","avg_worked_seconds":"253864340","job_positions":"Reporting Analyst","is_rehired":["true","true"]} { "index": {}} {"birth_date":"1956-02-12T00:00:00Z","emp_no":"10014","first_name":"Berni","hire_date":"1987-03-11T00:00:00Z","languages":"5","last_name":"Genin","salary":"37137","height":"1.99","still_hired":"false","avg_worked_seconds":"225049139","job_positions":["Reporting Analyst","Data Scientist","Head Human Resources"],"salary_change":["-1.89","9.07"]} { "index": {}} {"birth_date":"1959-08-19T00:00:00Z","emp_no":"10015","first_name":"Guoxiang","hire_date":"1987-07-02T00:00:00Z","languages":"5","last_name":"Nooteboom","salary":"25324","height":"1.66","still_hired":"true","avg_worked_seconds":"390266432","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Support Engineer"],"is_rehired":["true","false","false","false"],"salary_change":["14.25","12.40"]} { "index": {}} {"birth_date":"1961-05-02T00:00:00Z","emp_no":"10016","first_name":"Kazuhito","hire_date":"1995-01-27T00:00:00Z","languages":"2","last_name":"Cappelletti","salary":"61358","height":"1.54","still_hired":"false","avg_worked_seconds":"253029411","job_positions":["Reporting Analyst","Python Developer","Accountant","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["-5.18","7.69"]} { "index": {}} {"birth_date":"1958-07-06T00:00:00Z","emp_no":"10017","first_name":"Cristinel","hire_date":"1993-08-03T00:00:00Z","languages":"2","last_name":"Bouloucos","salary":"58715","height":"1.74","still_hired":"false","avg_worked_seconds":"236703986","job_positions":["Data Scientist","Head Human Resources","Purchase Manager"],"is_rehired":["true","false","true","true"],"salary_change":"-6.33"} { "index": {}} {"birth_date":"1954-06-19T00:00:00Z","emp_no":"10018","first_name":"Kazuhide","hire_date":"1987-04-03T00:00:00Z","languages":"2","last_name":"Peha","salary":"56760","height":"1.97","still_hired":"false","avg_worked_seconds":"309604079","job_positions":"Junior Developer","is_rehired":["false","false","true","true"],"salary_change":["-1.64","11.51","-5.32"]} { "index": {}} {"birth_date":"1953-01-23T00:00:00Z","emp_no":"10019","first_name":"Lillian","hire_date":"1999-04-30T00:00:00Z","languages":"1","last_name":"Haddadi","salary":"73717","height":"2.06","still_hired":"false","avg_worked_seconds":"342855721","job_positions":"Purchase Manager","is_rehired":["false","false"],"salary_change":["-6.84","8.42","-7.26"]} { "index": {}} {"birth_date":"1952-12-24T00:00:00Z","emp_no":"10020","first_name":"Mayuko","gender":"M","hire_date":"1991-01-26T00:00:00Z","last_name":"Warwick","salary":"40031","height":"1.41","still_hired":"false","avg_worked_seconds":"373309605","job_positions":"Tech Lead","is_rehired":["true","true","false"],"salary_change":"-5.81"} { "index": {}} {"birth_date":"1960-02-20T00:00:00Z","emp_no":"10021","first_name":"Ramzi","gender":"M","hire_date":"1988-02-10T00:00:00Z","last_name":"Erde","salary":"60408","height":"1.47","still_hired":"false","avg_worked_seconds":"287654610","job_positions":"Support Engineer","is_rehired":"true"} { "index": {}} {"birth_date":"1952-07-08T00:00:00Z","emp_no":"10022","first_name":"Shahaf","gender":"M","hire_date":"1995-08-22T00:00:00Z","last_name":"Famili","salary":"48233","height":"1.82","still_hired":"false","avg_worked_seconds":"233521306","job_positions":["Reporting Analyst","Data Scientist","Python Developer","Internship"],"is_rehired":["true","false"],"salary_change":["12.09","2.85"]} { "index": {}} {"birth_date":"1953-09-29T00:00:00Z","emp_no":"10023","first_name":"Bojan","gender":"F","hire_date":"1989-12-17T00:00:00Z","last_name":"Montemayor","salary":"47896","height":"1.75","still_hired":"true","avg_worked_seconds":"330870342","job_positions":["Accountant","Support Engineer","Purchase Manager"],"is_rehired":["true","true","false"],"salary_change":["14.63","0.80"]} { "index": {}} {"birth_date":"1958-09-05T00:00:00Z","emp_no":"10024","first_name":"Suzette","gender":"F","hire_date":"1997-05-19T00:00:00Z","last_name":"Pettey","salary":"64675","height":"2.08","still_hired":"true","avg_worked_seconds":"367717671","job_positions":"Junior Developer","is_rehired":["true","true","true","true"]} { "index": {}} {"birth_date":"1958-10-31T00:00:00Z","emp_no":"10025","first_name":"Prasadram","gender":"M","hire_date":"1987-08-17T00:00:00Z","last_name":"Heyers","salary":"47411","height":"1.87","still_hired":"false","avg_worked_seconds":"371270797","job_positions":"Accountant","is_rehired":["true","false"],"salary_change":["-4.33","-2.90","12.06","-3.46"]} { "index": {}} {"birth_date":"1953-04-03T00:00:00Z","emp_no":"10026","first_name":"Yongqiao","gender":"M","hire_date":"1995-03-20T00:00:00Z","last_name":"Berztiss","salary":"28336","height":"2.10","still_hired":"true","avg_worked_seconds":"359208133","job_positions":"Reporting Analyst","is_rehired":["false","true"],"salary_change":["-7.37","10.62","11.20"]} { "index": {}} {"birth_date":"1962-07-10T00:00:00Z","emp_no":"10027","first_name":"Divier","gender":"F","hire_date":"1989-07-07T00:00:00Z","last_name":"Reistad","salary":"73851","height":"1.53","still_hired":"false","avg_worked_seconds":"374037782","job_positions":"Senior Python Developer","is_rehired":"false"} { "index": {}} {"birth_date":"1963-11-26T00:00:00Z","emp_no":"10028","first_name":"Domenick","gender":"M","hire_date":"1991-10-22T00:00:00Z","last_name":"Tempesti","salary":"39356","height":"2.07","still_hired":"true","avg_worked_seconds":"226435054","job_positions":["Tech Lead","Python Developer","Accountant","Internship"],"is_rehired":["true","false","false","true"]} { "index": {}} {"birth_date":"1956-12-13T00:00:00Z","emp_no":"10029","first_name":"Otmar","gender":"M","hire_date":"1985-11-20T00:00:00Z","last_name":"Herbst","salary":"74999","height":"1.99","still_hired":"false","avg_worked_seconds":"257694181","job_positions":["Senior Python Developer","Data Scientist","Principal Support Engineer"],"is_rehired":"true","salary_change":["-0.32","-1.90","-8.19"]} { "index": {}} {"birth_date":"1958-07-14T00:00:00Z","emp_no":"10030","gender":"M","hire_date":"1994-02-17T00:00:00Z","languages":"3","last_name":"Demeyer","salary":"67492","height":"1.92","still_hired":"false","avg_worked_seconds":"394597613","job_positions":["Tech Lead","Data Scientist","Senior Team Lead"],"is_rehired":["true","false","false"],"salary_change":"-0.40"} { "index": {}} {"birth_date":"1959-01-27T00:00:00Z","emp_no":"10031","gender":"M","hire_date":"1991-09-01T00:00:00Z","languages":"4","last_name":"Joslin","salary":"37716","height":"1.68","still_hired":"false","avg_worked_seconds":"348545109","job_positions":["Architect","Senior Python Developer","Purchase Manager","Senior Team Lead"],"is_rehired":"false"} { "index": {}} {"birth_date":"1960-08-09T00:00:00Z","emp_no":"10032","gender":"F","hire_date":"1990-06-20T00:00:00Z","languages":"3","last_name":"Reistad","salary":"62233","height":"2.10","still_hired":"false","avg_worked_seconds":"277622619","job_positions":["Architect","Senior Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["false","false"],"salary_change":["9.32","-4.92"]} { "index": {}} {"birth_date":"1956-11-14T00:00:00Z","emp_no":"10033","gender":"M","hire_date":"1987-03-18T00:00:00Z","languages":"1","last_name":"Merlo","salary":"70011","height":"1.63","still_hired":"false","avg_worked_seconds":"208374744","is_rehired":"true"} { "index": {}} {"birth_date":"1962-12-29T00:00:00Z","emp_no":"10034","gender":"M","hire_date":"1988-09-21T00:00:00Z","languages":"1","last_name":"Swan","salary":"39878","height":"1.46","still_hired":"false","avg_worked_seconds":"214393176","job_positions":["Business Analyst","Data Scientist","Python Developer","Accountant"],"is_rehired":"false","salary_change":"-8.46"} { "index": {}} {"birth_date":"1953-02-08T00:00:00Z","emp_no":"10035","gender":"M","hire_date":"1988-09-05T00:00:00Z","languages":"5","last_name":"Chappelet","salary":"25945","height":"1.81","still_hired":"false","avg_worked_seconds":"203838153","job_positions":["Senior Python Developer","Data Scientist"],"is_rehired":"false","salary_change":["-2.54","-6.58"]} { "index": {}} {"birth_date":"1959-08-10T00:00:00Z","emp_no":"10036","gender":"M","hire_date":"1992-01-03T00:00:00Z","languages":"4","last_name":"Portugali","salary":"60781","height":"1.61","still_hired":"false","avg_worked_seconds":"305493131","job_positions":"Senior Python Developer","is_rehired":["true","false","false"]} { "index": {}} {"birth_date":"1963-07-22T00:00:00Z","emp_no":"10037","gender":"M","hire_date":"1990-12-05T00:00:00Z","languages":"2","last_name":"Makrucki","salary":"37691","height":"2.00","still_hired":"true","avg_worked_seconds":"359217000","job_positions":["Senior Python Developer","Tech Lead","Accountant"],"is_rehired":"false","salary_change":"-7.08"} { "index": {}} {"birth_date":"1960-07-20T00:00:00Z","emp_no":"10038","gender":"M","hire_date":"1989-09-20T00:00:00Z","languages":"4","last_name":"Lortz","salary":"35222","height":"1.53","still_hired":"true","avg_worked_seconds":"314036411","job_positions":["Senior Python Developer","Python Developer","Support Engineer"]} { "index": {}} {"birth_date":"1959-10-01T00:00:00Z","emp_no":"10039","gender":"M","hire_date":"1988-01-19T00:00:00Z","languages":"2","last_name":"Brender","salary":"36051","height":"1.55","still_hired":"false","avg_worked_seconds":"243221262","job_positions":["Business Analyst","Python Developer","Principal Support Engineer"],"is_rehired":["true","true"],"salary_change":"-6.90"} { "index": {}} {"emp_no":"10040","first_name":"Weiyi","gender":"F","hire_date":"1993-02-14T00:00:00Z","languages":"4","last_name":"Meriste","salary":"37112","height":"1.90","still_hired":"false","avg_worked_seconds":"244478622","job_positions":"Principal Support Engineer","is_rehired":["true","false","true","true"],"salary_change":["6.97","14.74","-8.94","1.92"]} { "index": {}} {"emp_no":"10041","first_name":"Uri","gender":"F","hire_date":"1989-11-12T00:00:00Z","languages":"1","last_name":"Lenart","salary":"56415","height":"1.75","still_hired":"false","avg_worked_seconds":"287789442","job_positions":["Data Scientist","Head Human Resources","Internship","Senior Team Lead"],"salary_change":["9.21","0.05","7.29","-2.94"]} { "index": {}} {"emp_no":"10042","first_name":"Magy","gender":"F","hire_date":"1993-03-21T00:00:00Z","languages":"3","last_name":"Stamatiou","salary":"30404","height":"1.44","still_hired":"true","avg_worked_seconds":"246355863","job_positions":["Architect","Business Analyst","Junior Developer","Internship"],"salary_change":["-9.28","9.42"]} { "index": {}} {"emp_no":"10043","first_name":"Yishay","gender":"M","hire_date":"1990-10-20T00:00:00Z","languages":"1","last_name":"Tzvieli","salary":"34341","height":"1.52","still_hired":"true","avg_worked_seconds":"287222180","job_positions":["Data Scientist","Python Developer","Support Engineer"],"is_rehired":["false","true","true"],"salary_change":["-5.17","4.62","7.42"]} { "index": {}} {"emp_no":"10044","first_name":"Mingsen","gender":"F","hire_date":"1994-05-21T00:00:00Z","languages":"1","last_name":"Casley","salary":"39728","height":"2.06","still_hired":"false","avg_worked_seconds":"387408356","job_positions":["Tech Lead","Principal Support Engineer","Accountant","Support Engineer"],"is_rehired":["true","true"],"salary_change":"8.09"} { "index": {}} {"emp_no":"10045","first_name":"Moss","gender":"M","hire_date":"1989-09-02T00:00:00Z","languages":"3","last_name":"Shanbhogue","salary":"74970","height":"1.70","still_hired":"false","avg_worked_seconds":"371418933","job_positions":["Principal Support Engineer","Junior Developer","Accountant","Purchase Manager"],"is_rehired":["true","false"]} { "index": {}} {"emp_no":"10046","first_name":"Lucien","gender":"M","hire_date":"1992-06-20T00:00:00Z","languages":"4","last_name":"Rosenbaum","salary":"50064","height":"1.52","still_hired":"true","avg_worked_seconds":"302353405","job_positions":["Principal Support Engineer","Junior Developer","Head Human Resources","Internship"],"is_rehired":["true","true","false","true"],"salary_change":"2.39"} { "index": {}} {"emp_no":"10047","first_name":"Zvonko","gender":"M","hire_date":"1989-03-31T00:00:00Z","languages":"4","last_name":"Nyanchama","salary":"42716","height":"1.52","still_hired":"true","avg_worked_seconds":"306369346","job_positions":["Architect","Data Scientist","Principal Support Engineer","Senior Team Lead"],"is_rehired":"true","salary_change":["-6.36","12.12"]} { "index": {}} {"emp_no":"10048","first_name":"Florian","gender":"M","hire_date":"1985-02-24T00:00:00Z","languages":"3","last_name":"Syrotiuk","salary":"26436","height":"2.00","still_hired":"false","avg_worked_seconds":"248451647","job_positions":"Internship","is_rehired":["true","true"]} { "index": {}} {"emp_no":"10049","first_name":"Basil","gender":"F","hire_date":"1992-05-04T00:00:00Z","languages":"5","last_name":"Tramer","salary":"37853","height":"1.52","still_hired":"true","avg_worked_seconds":"320725709","job_positions":["Senior Python Developer","Business Analyst"],"salary_change":"-1.05"} { "index": {}} {"birth_date":"1958-05-21T00:00:00Z","emp_no":"10050","first_name":"Yinghua","gender":"M","hire_date":"1990-12-25T00:00:00Z","languages":"2","last_name":"Dredge","salary":"43026","height":"1.96","still_hired":"true","avg_worked_seconds":"242731798","job_positions":["Reporting Analyst","Junior Developer","Accountant","Support Engineer"],"is_rehired":"true","salary_change":["8.70","10.94"]} { "index": {}} {"birth_date":"1953-07-28T00:00:00Z","emp_no":"10051","first_name":"Hidefumi","gender":"M","hire_date":"1992-10-15T00:00:00Z","languages":"3","last_name":"Caine","salary":"58121","height":"1.89","still_hired":"true","avg_worked_seconds":"374753122","job_positions":["Business Analyst","Accountant","Purchase Manager"]} { "index": {}} {"birth_date":"1961-02-26T00:00:00Z","emp_no":"10052","first_name":"Heping","gender":"M","hire_date":"1988-05-21T00:00:00Z","languages":"1","last_name":"Nitsch","salary":"55360","height":"1.79","still_hired":"true","avg_worked_seconds":"299654717","is_rehired":["true","true","false"],"salary_change":["-0.55","-1.89","-4.22","-6.03"]} { "index": {}} {"birth_date":"1954-09-13T00:00:00Z","emp_no":"10053","first_name":"Sanjiv","gender":"F","hire_date":"1986-02-04T00:00:00Z","languages":"3","last_name":"Zschoche","salary":"54462","height":"1.58","still_hired":"false","avg_worked_seconds":"368103911","job_positions":"Support Engineer","is_rehired":["true","false","true","false"],"salary_change":["-7.67","-3.25"]} { "index": {}} {"birth_date":"1957-04-04T00:00:00Z","emp_no":"10054","first_name":"Mayumi","gender":"M","hire_date":"1995-03-13T00:00:00Z","languages":"4","last_name":"Schueller","salary":"65367","height":"1.82","still_hired":"false","avg_worked_seconds":"297441693","job_positions":"Principal Support Engineer","is_rehired":["false","false"]} { "index": {}} {"birth_date":"1956-06-06T00:00:00Z","emp_no":"10055","first_name":"Georgy","gender":"M","hire_date":"1992-04-27T00:00:00Z","languages":"5","last_name":"Dredge","salary":"49281","height":"2.04","still_hired":"false","avg_worked_seconds":"283157844","job_positions":["Senior Python Developer","Head Human Resources","Internship","Support Engineer"],"is_rehired":["false","false","true"],"salary_change":["7.34","12.99","3.17"]} { "index": {}} {"birth_date":"1961-09-01T00:00:00Z","emp_no":"10056","first_name":"Brendon","gender":"F","hire_date":"1990-02-01T00:00:00Z","languages":"2","last_name":"Bernini","salary":"33370","height":"1.57","still_hired":"true","avg_worked_seconds":"349086555","job_positions":"Senior Team Lead","is_rehired":["true","false","false"],"salary_change":["10.99","-5.17"]} { "index": {}} {"birth_date":"1954-05-30T00:00:00Z","emp_no":"10057","first_name":"Ebbe","gender":"F","hire_date":"1992-01-15T00:00:00Z","languages":"4","last_name":"Callaway","salary":"27215","height":"1.59","still_hired":"true","avg_worked_seconds":"324356269","job_positions":["Python Developer","Head Human Resources"],"salary_change":["-6.73","-2.43","-5.27","1.03"]} { "index": {}} {"birth_date":"1954-10-01T00:00:00Z","emp_no":"10058","first_name":"Berhard","gender":"M","hire_date":"1987-04-13T00:00:00Z","languages":"3","last_name":"McFarlin","salary":"38376","height":"1.83","still_hired":"false","avg_worked_seconds":"268378108","job_positions":"Principal Support Engineer","salary_change":"-4.89"} { "index": {}} {"birth_date":"1953-09-19T00:00:00Z","emp_no":"10059","first_name":"Alejandro","gender":"F","hire_date":"1991-06-26T00:00:00Z","languages":"2","last_name":"McAlpine","salary":"44307","height":"1.48","still_hired":"false","avg_worked_seconds":"237368465","job_positions":["Architect","Principal Support Engineer","Purchase Manager","Senior Team Lead"],"is_rehired":"false","salary_change":["5.53","13.38","-4.69","6.27"]} { "index": {}} {"birth_date":"1961-10-15T00:00:00Z","emp_no":"10060","first_name":"Breannda","gender":"M","hire_date":"1987-11-02T00:00:00Z","languages":"2","last_name":"Billingsley","salary":"29175","height":"1.42","still_hired":"true","avg_worked_seconds":"341158890","job_positions":["Business Analyst","Data Scientist","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["-1.76","-0.85"]} { "index": {}} {"birth_date":"1962-10-19T00:00:00Z","emp_no":"10061","first_name":"Tse","gender":"M","hire_date":"1985-09-17T00:00:00Z","languages":"1","last_name":"Herber","salary":"49095","height":"1.45","still_hired":"false","avg_worked_seconds":"327550310","job_positions":["Purchase Manager","Senior Team Lead"],"is_rehired":["false","true"],"salary_change":["14.39","-2.58","-0.95"]} { "index": {}} {"birth_date":"1961-11-02T00:00:00Z","emp_no":"10062","first_name":"Anoosh","gender":"M","hire_date":"1991-08-30T00:00:00Z","languages":"3","last_name":"Peyn","salary":"65030","height":"1.70","still_hired":"false","avg_worked_seconds":"203989706","job_positions":["Python Developer","Senior Team Lead"],"is_rehired":["false","true","true"],"salary_change":"-1.17"} { "index": {}} {"birth_date":"1952-08-06T00:00:00Z","emp_no":"10063","first_name":"Gino","gender":"F","hire_date":"1989-04-08T00:00:00Z","languages":"3","last_name":"Leonhardt","salary":"52121","height":"1.78","still_hired":"true","avg_worked_seconds":"214068302","is_rehired":"true"} { "index": {}} {"birth_date":"1959-04-07T00:00:00Z","emp_no":"10064","first_name":"Udi","gender":"M","hire_date":"1985-11-20T00:00:00Z","languages":"5","last_name":"Jansch","salary":"33956","height":"1.93","still_hired":"false","avg_worked_seconds":"307364077","job_positions":"Purchase Manager","is_rehired":["false","false","true","false"],"salary_change":["-8.66","-2.52"]} { "index": {}} {"birth_date":"1963-04-14T00:00:00Z","emp_no":"10065","first_name":"Satosi","gender":"M","hire_date":"1988-05-18T00:00:00Z","languages":"2","last_name":"Awdeh","salary":"50249","height":"1.59","still_hired":"false","avg_worked_seconds":"372660279","job_positions":["Business Analyst","Data Scientist","Principal Support Engineer"],"is_rehired":["false","true"],"salary_change":["-1.47","14.44","-9.81"]} { "index": {}} {"birth_date":"1952-11-13T00:00:00Z","emp_no":"10066","first_name":"Kwee","gender":"M","hire_date":"1986-02-26T00:00:00Z","languages":"5","last_name":"Schusler","salary":"31897","height":"2.10","still_hired":"true","avg_worked_seconds":"360906451","job_positions":["Senior Python Developer","Data Scientist","Accountant","Internship"],"is_rehired":["true","true","true"],"salary_change":"5.94"} { "index": {}} {"birth_date":"1953-01-07T00:00:00Z","emp_no":"10067","first_name":"Claudi","gender":"M","hire_date":"1987-03-04T00:00:00Z","languages":"2","last_name":"Stavenow","salary":"52044","height":"1.77","still_hired":"true","avg_worked_seconds":"347664141","job_positions":["Tech Lead","Principal Support Engineer"],"is_rehired":["false","false"],"salary_change":["8.72","4.44"]} { "index": {}} {"birth_date":"1962-11-26T00:00:00Z","emp_no":"10068","first_name":"Charlene","gender":"M","hire_date":"1987-08-07T00:00:00Z","languages":"3","last_name":"Brattka","salary":"28941","height":"1.58","still_hired":"true","avg_worked_seconds":"233999584","job_positions":"Architect","is_rehired":"true","salary_change":["3.43","-5.61","-5.29"]} { "index": {}} {"birth_date":"1960-09-06T00:00:00Z","emp_no":"10069","first_name":"Margareta","gender":"F","hire_date":"1989-11-05T00:00:00Z","languages":"5","last_name":"Bierman","salary":"41933","height":"1.77","still_hired":"true","avg_worked_seconds":"366512352","job_positions":["Business Analyst","Junior Developer","Purchase Manager","Support Engineer"],"is_rehired":"false","salary_change":["-3.34","-6.33","6.23","-0.31"]} { "index": {}} {"birth_date":"1955-08-20T00:00:00Z","emp_no":"10070","first_name":"Reuven","gender":"M","hire_date":"1985-10-14T00:00:00Z","languages":"3","last_name":"Garigliano","salary":"54329","height":"1.77","still_hired":"true","avg_worked_seconds":"347188604","is_rehired":["true","true","true"],"salary_change":"-5.90"} { "index": {}} {"birth_date":"1958-01-21T00:00:00Z","emp_no":"10071","first_name":"Hisao","gender":"M","hire_date":"1987-10-01T00:00:00Z","languages":"2","last_name":"Lipner","salary":"40612","height":"2.07","still_hired":"false","avg_worked_seconds":"306671693","job_positions":["Business Analyst","Reporting Analyst","Senior Team Lead"],"is_rehired":["false","false","false"],"salary_change":"-2.69"} { "index": {}} {"birth_date":"1952-05-15T00:00:00Z","emp_no":"10072","first_name":"Hironoby","gender":"F","hire_date":"1988-07-21T00:00:00Z","languages":"5","last_name":"Sidou","salary":"54518","height":"1.82","still_hired":"true","avg_worked_seconds":"209506065","job_positions":["Architect","Tech Lead","Python Developer","Senior Team Lead"],"is_rehired":["false","false","true","false"],"salary_change":["11.21","-2.30","2.22","-5.44"]} { "index": {}} {"birth_date":"1954-02-23T00:00:00Z","emp_no":"10073","first_name":"Shir","gender":"M","hire_date":"1991-12-01T00:00:00Z","languages":"4","last_name":"McClurg","salary":"32568","height":"1.66","still_hired":"false","avg_worked_seconds":"314930367","job_positions":["Principal Support Engineer","Python Developer","Junior Developer","Purchase Manager"],"is_rehired":["true","false"],"salary_change":"-5.67"} { "index": {}} {"birth_date":"1955-08-28T00:00:00Z","emp_no":"10074","first_name":"Mokhtar","gender":"F","hire_date":"1990-08-13T00:00:00Z","languages":"5","last_name":"Bernatsky","salary":"38992","height":"1.64","still_hired":"true","avg_worked_seconds":"382397583","job_positions":["Senior Python Developer","Python Developer"],"is_rehired":["true","false","false","true"],"salary_change":["6.70","1.98","-5.64","2.96"]} { "index": {}} {"birth_date":"1960-03-09T00:00:00Z","emp_no":"10075","first_name":"Gao","gender":"F","hire_date":"1987-03-19T00:00:00Z","languages":"5","last_name":"Dolinsky","salary":"51956","height":"1.94","still_hired":"false","avg_worked_seconds":"370238919","job_positions":"Purchase Manager","is_rehired":"true","salary_change":["9.63","-3.29","8.42"]} { "index": {}} {"birth_date":"1952-06-13T00:00:00Z","emp_no":"10076","first_name":"Erez","gender":"F","hire_date":"1985-07-09T00:00:00Z","languages":"3","last_name":"Ritzmann","salary":"62405","height":"1.83","still_hired":"false","avg_worked_seconds":"376240317","job_positions":["Architect","Senior Python Developer"],"is_rehired":"false","salary_change":["-6.90","-1.30","8.75"]} { "index": {}} {"birth_date":"1964-04-18T00:00:00Z","emp_no":"10077","first_name":"Mona","gender":"M","hire_date":"1990-03-02T00:00:00Z","languages":"5","last_name":"Azuma","salary":"46595","height":"1.68","still_hired":"false","avg_worked_seconds":"351960222","job_positions":"Internship","salary_change":"-0.01"} { "index": {}} {"birth_date":"1959-12-25T00:00:00Z","emp_no":"10078","first_name":"Danel","gender":"F","hire_date":"1987-05-26T00:00:00Z","languages":"2","last_name":"Mondadori","salary":"69904","height":"1.81","still_hired":"true","avg_worked_seconds":"377116038","job_positions":["Architect","Principal Support Engineer","Internship"],"is_rehired":"true","salary_change":["-7.88","9.98","12.52"]} { "index": {}} {"birth_date":"1961-10-05T00:00:00Z","emp_no":"10079","first_name":"Kshitij","gender":"F","hire_date":"1986-03-27T00:00:00Z","languages":"2","last_name":"Gils","salary":"32263","height":"1.59","still_hired":"false","avg_worked_seconds":"320953330","is_rehired":"false","salary_change":"7.58"} { "index": {}} {"birth_date":"1957-12-03T00:00:00Z","emp_no":"10080","first_name":"Premal","gender":"M","hire_date":"1985-11-19T00:00:00Z","languages":"5","last_name":"Baek","salary":"52833","height":"1.80","still_hired":"false","avg_worked_seconds":"239266137","job_positions":"Senior Python Developer","salary_change":["-4.35","7.36","5.56"]} { "index": {}} {"birth_date":"1960-12-17T00:00:00Z","emp_no":"10081","first_name":"Zhongwei","gender":"M","hire_date":"1986-10-30T00:00:00Z","languages":"2","last_name":"Rosen","salary":"50128","height":"1.44","still_hired":"true","avg_worked_seconds":"321375511","job_positions":["Accountant","Internship"],"is_rehired":["false","false","false"]} { "index": {}} {"birth_date":"1963-09-09T00:00:00Z","emp_no":"10082","first_name":"Parviz","gender":"M","hire_date":"1990-01-03T00:00:00Z","languages":"4","last_name":"Lortz","salary":"49818","height":"1.61","still_hired":"false","avg_worked_seconds":"232522994","job_positions":"Principal Support Engineer","is_rehired":"false","salary_change":["1.19","-3.39"]} { "index": {}} {"birth_date":"1959-07-23T00:00:00Z","emp_no":"10083","first_name":"Vishv","gender":"M","hire_date":"1987-03-31T00:00:00Z","languages":"1","last_name":"Zockler","salary":"39110","height":"1.42","still_hired":"false","avg_worked_seconds":"331236443","job_positions":"Head Human Resources"} { "index": {}} {"birth_date":"1960-05-25T00:00:00Z","emp_no":"10084","first_name":"Tuval","gender":"M","hire_date":"1995-12-15T00:00:00Z","languages":"1","last_name":"Kalloufi","salary":"28035","height":"1.51","still_hired":"true","avg_worked_seconds":"359067056","job_positions":"Principal Support Engineer","is_rehired":"false"} { "index": {}} {"birth_date":"1962-11-07T00:00:00Z","emp_no":"10085","first_name":"Kenroku","gender":"M","hire_date":"1994-04-09T00:00:00Z","languages":"5","last_name":"Malabarba","salary":"35742","height":"2.01","still_hired":"true","avg_worked_seconds":"353404008","job_positions":["Senior Python Developer","Business Analyst","Tech Lead","Accountant"],"salary_change":["11.67","6.75","8.40"]} { "index": {}} {"birth_date":"1962-11-19T00:00:00Z","emp_no":"10086","first_name":"Somnath","gender":"M","hire_date":"1990-02-16T00:00:00Z","languages":"1","last_name":"Foote","salary":"68547","height":"1.74","still_hired":"true","avg_worked_seconds":"328580163","job_positions":"Senior Python Developer","is_rehired":["false","true"],"salary_change":"13.61"} { "index": {}} {"birth_date":"1959-07-23T00:00:00Z","emp_no":"10087","first_name":"Xinglin","gender":"F","hire_date":"1986-09-08T00:00:00Z","languages":"5","last_name":"Eugenio","salary":"32272","height":"1.74","still_hired":"true","avg_worked_seconds":"305782871","job_positions":["Junior Developer","Internship"],"is_rehired":["false","false"],"salary_change":"-2.05"} { "index": {}} {"birth_date":"1954-02-25T00:00:00Z","emp_no":"10088","first_name":"Jungsoon","gender":"F","hire_date":"1988-09-02T00:00:00Z","languages":"5","last_name":"Syrzycki","salary":"39638","height":"1.91","still_hired":"false","avg_worked_seconds":"330714423","job_positions":["Reporting Analyst","Business Analyst","Tech Lead"],"is_rehired":"true"} { "index": {}} {"birth_date":"1963-03-21T00:00:00Z","emp_no":"10089","first_name":"Sudharsan","gender":"F","hire_date":"1986-08-12T00:00:00Z","languages":"4","last_name":"Flasterstein","salary":"43602","height":"1.57","still_hired":"true","avg_worked_seconds":"232951673","job_positions":["Junior Developer","Accountant"],"is_rehired":["true","false","false","false"]} { "index": {}} {"birth_date":"1961-05-30T00:00:00Z","emp_no":"10090","first_name":"Kendra","gender":"M","hire_date":"1986-03-14T00:00:00Z","languages":"2","last_name":"Hofting","salary":"44956","height":"2.03","still_hired":"true","avg_worked_seconds":"212460105","is_rehired":["false","false","false","true"],"salary_change":["7.15","-1.85","3.60"]} { "index": {}} {"birth_date":"1955-10-04T00:00:00Z","emp_no":"10091","first_name":"Amabile","gender":"M","hire_date":"1992-11-18T00:00:00Z","languages":"3","last_name":"Gomatam","salary":"38645","height":"2.09","still_hired":"true","avg_worked_seconds":"242582807","job_positions":["Reporting Analyst","Python Developer"],"is_rehired":["true","true","false","false"],"salary_change":["-9.23","7.50","5.85","5.19"]} { "index": {}} {"birth_date":"1964-10-18T00:00:00Z","emp_no":"10092","first_name":"Valdiodio","gender":"F","hire_date":"1989-09-22T00:00:00Z","languages":"1","last_name":"Niizuma","salary":"25976","height":"1.75","still_hired":"false","avg_worked_seconds":"313407352","job_positions":["Junior Developer","Accountant"],"is_rehired":["false","false","true","true"],"salary_change":["8.78","0.39","-6.77","8.30"]} { "index": {}} {"birth_date":"1964-06-11T00:00:00Z","emp_no":"10093","first_name":"Sailaja","gender":"M","hire_date":"1996-11-05T00:00:00Z","languages":"3","last_name":"Desikan","salary":"45656","height":"1.69","still_hired":"false","avg_worked_seconds":"315904921","job_positions":["Reporting Analyst","Tech Lead","Principal Support Engineer","Purchase Manager"],"salary_change":"-0.88"} { "index": {}} {"birth_date":"1957-05-25T00:00:00Z","emp_no":"10094","first_name":"Arumugam","gender":"F","hire_date":"1987-04-18T00:00:00Z","languages":"5","last_name":"Ossenbruggen","salary":"66817","height":"2.10","still_hired":"false","avg_worked_seconds":"332920135","job_positions":["Senior Python Developer","Principal Support Engineer","Accountant"],"is_rehired":["true","false","true"],"salary_change":["2.22","7.92"]} { "index": {}} {"birth_date":"1965-01-03T00:00:00Z","emp_no":"10095","first_name":"Hilari","gender":"M","hire_date":"1986-07-15T00:00:00Z","languages":"4","last_name":"Morton","salary":"37702","height":"1.55","still_hired":"false","avg_worked_seconds":"321850475","is_rehired":["true","true","false","false"],"salary_change":["-3.93","-6.66"]} { "index": {}} {"birth_date":"1954-09-16T00:00:00Z","emp_no":"10096","first_name":"Jayson","gender":"M","hire_date":"1990-01-14T00:00:00Z","languages":"4","last_name":"Mandell","salary":"43889","height":"1.94","still_hired":"false","avg_worked_seconds":"204381503","job_positions":["Architect","Reporting Analyst"],"is_rehired":["false","false","false"]} { "index": {}} {"birth_date":"1952-02-27T00:00:00Z","emp_no":"10097","first_name":"Remzi","gender":"M","hire_date":"1990-09-15T00:00:00Z","languages":"3","last_name":"Waschkowski","salary":"71165","height":"1.53","still_hired":"false","avg_worked_seconds":"206258084","job_positions":["Reporting Analyst","Tech Lead"],"is_rehired":["true","false"],"salary_change":"-1.12"} { "index": {}} {"birth_date":"1961-09-23T00:00:00Z","emp_no":"10098","first_name":"Sreekrishna","gender":"F","hire_date":"1985-05-13T00:00:00Z","languages":"4","last_name":"Servieres","salary":"44817","height":"2.00","still_hired":"false","avg_worked_seconds":"272392146","job_positions":["Architect","Internship","Senior Team Lead"],"is_rehired":"false","salary_change":["-2.83","8.31","4.38"]} { "index": {}} {"birth_date":"1956-05-25T00:00:00Z","emp_no":"10099","first_name":"Valter","gender":"F","hire_date":"1988-10-18T00:00:00Z","languages":"2","last_name":"Sullins","salary":"73578","height":"1.81","still_hired":"true","avg_worked_seconds":"377713748","is_rehired":["true","true"],"salary_change":["10.71","14.26","-8.78","-3.98"]} { "index": {}} {"birth_date":"1953-04-21T00:00:00Z","emp_no":"10100","first_name":"Hironobu","gender":"F","hire_date":"1987-09-21T00:00:00Z","languages":"4","last_name":"Haraldson","salary":"68431","height":"1.77","still_hired":"true","avg_worked_seconds":"223910853","job_positions":"Purchase Manager","is_rehired":["false","true","true","false"],"salary_change":["13.97","-7.49"]} ``` :::: ## Convert the dataset [convert-dataset-pandas-dataframe] Use the ES|QL CSV import to convert the `employees` dataset to a Pandas dataframe object. ```python from io import StringIO from elasticsearch import Elasticsearch import pandas as pd client = Elasticsearch( "https://[host].elastic-cloud.com", api_key="...", ) response = client.esql.query( query="FROM employees | LIMIT 500", format="csv", ) df = pd.read_csv(StringIO(response.body)) print(df) ``` Even though the dataset contains only 100 records, a LIMIT of 500 is specified to suppress ES|QL warnings about potentially missing records. This prints the following dataframe: ```python avg_worked_seconds ... salary_change.long still_hired 0 268728049 ... 1 True 1 328922887 ... [-7, 11] True 2 200296405 ... [12, 14] False 3 311267831 ... [0, 1, 3, 13] True 4 244294991 ... [-2, 13] True .. ... ... ... ... 95 204381503 ... NaN False 96 206258084 ... -1 False 97 272392146 ... [-2, 4, 8] False 98 377713748 ... [-8, -3, 10, 14] True 99 223910853 ... [-7, 13] True ``` You can now analyze the data with Pandas or you can also continue transforming the data using ES|QL. ## Analyze the data with Pandas [analyze-data] In the next example, the [STATS … BY](elasticsearch://reference/query-languages/esql/commands/processing-commands.md#esql-stats-by) command is utilized to count how many employees are speaking a given language. The results are sorted with the `languages` column using [SORT](elasticsearch://reference/query-languages/esql/commands/processing-commands.md#esql-sort): ```python response = client.esql.query( query=""" FROM employees | STATS count = COUNT(emp_no) BY languages | SORT languages | LIMIT 500 """, format="csv", ) df = pd.read_csv( StringIO(response.body), dtype={"count": "Int64", "languages": "Int64"}, ) print(df) ``` Note that the `dtype` parameter of `pd.read_csv()` is useful when the type inferred by Pandas is not enough. The code prints the following response: ```python count languages 0 15 1 1 19 2 2 17 3 3 18 4 4 21 5 ``` ## Pass parameters to a query with ES|QL [passing-params] Use the [built-in parameters support of the ES|QL REST API](docs-content://explore-analyze/query-filter/languages/esql-rest.md#esql-rest-params) to pass parameters to a query: ```python response = client.esql.query( query=""" FROM employees | STATS count = COUNT(emp_no) BY languages | WHERE languages >= (?) | SORT languages | LIMIT 500 """, format="csv", params=[3], ) df = pd.read_csv( StringIO(response.body), dtype={"count": "Int64", "languages": "Int64"}, ) print(df) ``` The code above outputs the following: ```python count languages 0 17 3 1 18 4 2 21 5 ``` If you want to learn more about ES|QL, refer to the [ES|QL documentation](docs-content://explore-analyze/query-filter/languages/esql.md). You can also check out this other [Python example using Boston Celtics data](https://github.com/elastic/elasticsearch-labs/blob/main/supporting-blog-content/Boston-Celtics-Demo/celtics-esql-demo.ipynb). python-elasticsearch-9.1.1/docs/reference/esql-query-builder.md000066400000000000000000000214271506101734100246220ustar00rootroot00000000000000# ES|QL Query Builder ::::{warning} This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features. :::: The ES|QL Query Builder allows you to construct ES|QL queries using Python syntax. Consider the following example: ```python >>> from elasticsearch.esql import ESQL >>> query = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(height_feet="height * 3.281", height_cm="height * 100") .limit(3) ) ``` You can then see the assembled ES|QL query by printing the resulting query object: ```python >>> print(query) FROM employees | SORT emp_no | KEEP first_name, last_name, height | EVAL height_feet = height * 3.281, height_cm = height * 100 | LIMIT 3 ``` To execute this query, you can pass it to the `client.esql.query()` endpoint: ```python >>> from elasticsearch import Elasticsearch >>> client = Elasticsearch(hosts=[os.environ['ELASTICSEARCH_URL']]) >>> response = client.esql.query(query=query) ``` The response body contains a `columns` attribute with the list of columns included in the results, and a `values` attribute with the list of results for the query, each given as a list of column values. Here is a possible response body returned by the example query given above: ```python >>> from pprint import pprint >>> pprint(response.body) {'columns': [{'name': 'first_name', 'type': 'text'}, {'name': 'last_name', 'type': 'text'}, {'name': 'height', 'type': 'double'}, {'name': 'height_feet', 'type': 'double'}, {'name': 'height_cm', 'type': 'double'}], 'is_partial': False, 'took': 11, 'values': [['Adrian', 'Wells', 2.424, 7.953144, 242.4], ['Aaron', 'Gonzalez', 1.584, 5.1971, 158.4], ['Miranda', 'Kramer', 1.55, 5.08555, 155]]} ``` ## Creating an ES|QL query To construct an ES|QL query you start from one of the ES|QL source commands: ### `ESQL.from_` The `FROM` command selects the indices, data streams or aliases to be queried. Examples: ```python from elasticsearch.esql import ESQL # FROM employees query1 = ESQL.from_("employees") # FROM query2 = ESQL.from_("") # FROM employees-00001, other-employees-* query3 = ESQL.from_("employees-00001", "other-employees-*") # FROM cluster_one:employees-00001, cluster_two:other-employees-* query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") # FROM employees METADATA _id query5 = ESQL.from_("employees").metadata("_id") ``` Note how in the last example the optional `METADATA` clause of the `FROM` command is added as a chained method. ### `ESQL.row` The `ROW` command produces a row with one or more columns, with the values that you specify. Examples: ```python from elasticsearch.esql import ESQL, functions # ROW a = 1, b = "two", c = null query1 = ESQL.row(a=1, b="two", c=None) # ROW a = [1, 2] query2 = ESQL.row(a=[1, 2]) # ROW a = ROUND(1.23, 0) query3 = ESQL.row(a=functions.round(1.23, 0)) ``` ### `ESQL.show` The `SHOW` command returns information about the deployment and its capabilities. Example: ```python from elasticsearch.esql import ESQL # SHOW INFO query = ESQL.show("INFO") ``` ## Adding processing commands Once you have a query object, you can add one or more processing commands to it. The following example shows how to create a query that uses the `WHERE` and `LIMIT` commands to filter the results: ```python from elasticsearch.esql import ESQL # FROM employees # | WHERE still_hired == true # | LIMIT 10 query = ESQL.from_("employees").where("still_hired == true").limit(10) ``` For a complete list of available commands, review the methods of the [`ESQLBase` class](https://elasticsearch-py.readthedocs.io/en/stable/esql.html) in the Elasticsearch Python API documentation. ## Creating ES|QL Expressions and Conditions The ES|QL query builder for Python provides two ways to create expressions and conditions in ES|QL queries. The simplest option is to provide all ES|QL expressions and conditionals as strings. The following example uses this approach to add two calculated columns to the results using the `EVAL` command: ```python from elasticsearch.esql import ESQL # FROM employees # | SORT emp_no # | KEEP first_name, last_name, height # | EVAL height_feet = height * 3.281, height_cm = height * 100 query = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(height_feet="height * 3.281", height_cm="height * 100") ) ``` A more advanced alternative is to replace the strings with Python expressions, which are automatically translated to ES|QL when the query object is rendered to a string. The following example is functionally equivalent to the one above: ```python from elasticsearch.esql import ESQL, E # FROM employees # | SORT emp_no # | KEEP first_name, last_name, height # | EVAL height_feet = height * 3.281, height_cm = height * 100 query = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100) ) ``` Here the `E()` helper function is used as a wrapper to the column name that initiates an ES|QL expression. The `E()` function transforms the given column into an ES|QL expression that can be modified with Python operators. Here is a second example, which uses a conditional expression in the `WHERE` command: ```python from elasticsearch.esql import ESQL # FROM employees # | KEEP first_name, last_name, height # | WHERE first_name == "Larry" query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where('first_name == "Larry"') ) ``` Using Python syntax, the condition can be rewritten as follows: ```python from elasticsearch.esql import ESQL, E # FROM employees # | KEEP first_name, last_name, height # | WHERE first_name == "Larry" query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where(E("first_name") == "Larry") ) ``` ### Preventing injection attacks ES|QL, like most query languages, is vulnerable to [code injection attacks](https://en.wikipedia.org/wiki/Code_injection) if untrusted data provided by users is added to a query. To eliminate this risk, ES|QL allows untrusted data to be given separately from the query as parameters. Continuing with the example above, let's assume that the application needs a `find_employee_by_name()` function that searches for the name given as an argument. If this argument is received by the application from users, then it is considered untrusted and should not be added to the query directly. Here is how to code the function in a secure manner: ```python def find_employee_by_name(name): query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where(E("first_name") == E("?")) ) return client.esql.query(query=query, params=[name]) ``` Here the part of the query in which the untrusted data needs to be inserted is replaced with a parameter, which in ES|QL is defined by the question mark. When using Python expressions, the parameter must be given as `E("?")` so that it is treated as an expression and not as a literal string. The list of values given in the `params` argument to the query endpoint are assigned in order to the parameters defined in the query. ## Using ES|QL functions The ES|QL language includes a rich set of functions that can be used in expressions and conditionals. These can be included in expressions given as strings, as shown in the example below: ```python from elasticsearch.esql import ESQL # FROM employees # | KEEP first_name, last_name, height # | WHERE LENGTH(first_name) < 4" query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where("LENGTH(first_name) < 4") ) ``` All available ES|QL functions have Python wrappers in the `elasticsearch.esql.functions` module, which can be used when building expressions using Python syntax. Below is the example above coded using Python syntax: ```python from elasticsearch.esql import ESQL, functions # FROM employees # | KEEP first_name, last_name, height # | WHERE LENGTH(first_name) < 4" query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where(functions.length(E("first_name")) < 4) ) ``` Note that arguments passed to functions are assumed to be literals. When passing field names, parameters or other ES|QL expressions, it is necessary to wrap them with the `E()` helper function so that they are interpreted correctly. You can find the complete list of available functions in the Python client's [ES|QL API reference documentation](https://elasticsearch-py.readthedocs.io/en/stable/esql.html#module-elasticsearch.esql.functions). python-elasticsearch-9.1.1/docs/reference/examples.md000066400000000000000000000224301506101734100227000ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/examples.html --- # Examples [examples] Below you can find examples of how to use the most frequently called APIs with the Python client. * [Indexing a document](#ex-index) * [Getting a document](#ex-get) * [Refreshing an index](#ex-refresh) * [Searching for a document](#ex-search) * [Updating a document](#ex-update) * [Deleting a document](#ex-delete) ## Indexing a document [ex-index] To index a document, you need to specify three pieces of information: `index`, `id`, and a `document`: ```py from datetime import datetime from elasticsearch import Elasticsearch client = Elasticsearch('https://localhost:9200') doc = { 'author': 'author_name', 'text': 'Interesting content...', 'timestamp': datetime.now(), } resp = client.index(index="test-index", id=1, document=doc) print(resp['result']) ``` ## Getting a document [ex-get] To get a document, you need to specify its `index` and `id`: ```py resp = client.get(index="test-index", id=1) print(resp['_source']) ``` ## Refreshing an index [ex-refresh] You can perform the refresh operation on an index: ```py client.indices.refresh(index="test-index") ``` ## Searching for a document [ex-search] The `search()` method returns results that are matching a query: ```py resp = client.search(index="test-index", query={"match_all": {}}) print("Got %d Hits:" % resp['hits']['total']['value']) for hit in resp['hits']['hits']: print("%(timestamp)s %(author)s: %(text)s" % hit["_source"]) ``` ## Updating a document [ex-update] To update a document, you need to specify three pieces of information: `index`, `id`, and a `doc`: ```py from datetime import datetime from elasticsearch import Elasticsearch client = Elasticsearch('https://localhost:9200') doc = { 'author': 'author_name', 'text': 'Interesting modified content...', 'timestamp': datetime.now(), } resp = client.update(index="test-index", id=1, doc=doc) print(resp['result']) ``` ## Deleting a document [ex-delete] You can delete a document by specifying its `index`, and `id` in the `delete()` method: ```py client.delete(index="test-index", id=1) ``` ## Interactive examples [ex-interactive] The [elasticsearch-labs](https://github.com/elastic/elasticsearch-labs) repo contains interactive and executable [Python notebooks](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks), sample apps, and resources for testing out Elasticsearch, using the Python client. These examples are mainly focused on vector search, hybrid search and generative AI use cases, but you’ll also find examples of basic operations like creating index mappings and performing lexical search. ### Search notebooks [_search_notebooks] The [Search](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/search) folder is a good place to start if you’re new to Elasticsearch. This folder contains a number of notebooks that demonstrate the fundamentals of Elasticsearch, like indexing vectors, running lexical, semantic and *hybrid* searches, and more. The following notebooks are available: * [Quick start](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb) * [Keyword, querying, filtering](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb) * [Hybrid search](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb) * [Semantic search with ELSER](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb) * [Multilingual semantic search](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb) * [Query rules](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb) * [Synonyms API quick start](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb) Here’s a brief overview of what you’ll learn in each notebook. #### Quick start [_quick_start] In the [00-quick-start.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/00-quick-start.ipynb) notebook you’ll learn how to: * Use the Elasticsearch Python client for various operations. * Create and define an index for a sample dataset with `dense_vector` fields. * Transform book titles into embeddings using [Sentence Transformers](https://www.sbert.net) and index them into Elasticsearch. * Perform k-nearest neighbors (knn) semantic searches. * Integrate traditional text-based search with semantic search, for a hybrid search system. * Use reciprocal rank fusion (RRF) to intelligently combine search results from different retrieval systems. #### Keyword, querying, filtering [_keyword_querying_filtering] In the [01-keyword-querying-filtering.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/01-keyword-querying-filtering.ipynb) notebook, you’ll learn how to: * Use [query and filter contexts](docs-content://explore-analyze/query-filter/languages/querydsl.md) to search and filter documents in Elasticsearch. * Execute full-text searches with `match` and `multi-match` queries. * Query and filter documents based on `text`, `number`, `date`, or `boolean` values. * Run multi-field searches using the `multi-match` query. * Prioritize specific fields in the `multi-match` query for tailored results. #### Hybrid search [_hybrid_search] In the [02-hybrid-search.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/02-hybrid-search.ipynb) notebook, you’ll learn how to: * Combine results of traditional text-based search with semantic search, for a hybrid search system. * Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch. * Use the [RRF API](elasticsearch://reference/elasticsearch/rest-apis/reciprocal-rank-fusion.md#rrf-api) to combine the results of a `match` query and a `kNN` semantic search. * Walk through a super simple toy example that demonstrates, step by step, how RRF ranking works. #### Semantic search with ELSER [_semantic_search_with_elser] In the [03-ELSER.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb) notebook, you’ll learn how to: * Use the Elastic Learned Sparse Encoder (ELSER) for text expansion-powered semantic search, out of the box — without training, fine-tuning, or embeddings generation. * Download and deploy the ELSER model in your Elastic environment. * Create an Elasticsearch index named search-movies with specific mappings and index a dataset of movie descriptions. * Create an ingest pipeline containing an inference processor for ELSER model execution. * Reindex the data from search-movies into another index, elser-movies, using the ELSER pipeline for text expansion. * Observe the results of running the documents through the model by inspecting the additional terms it adds to documents, which enhance searchability. * Perform simple keyword searches on the elser-movies index to assess the impact of ELSER’s text expansion. * Execute ELSER-powered semantic searches using the `text_expansion` query. #### Multilingual semantic search [_multilingual_semantic_search] In the [04-multilingual.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/04-multilingual.ipynb) notebook, you’ll learn how to: * Use a multilingual embedding model for semantic search across languages. * Transform fields in the sample dataset into embeddings using the Sentence Transformer model and index them into Elasticsearch. * Use filtering with a `kNN` semantic search. * Walk through a super simple toy example that demonstrates, step by step, how multilingual search works across languages, and within non-English languages. #### Query rules [_query_rules] In the [05-query-rules.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/05-query-rules.ipynb) notebook, you’ll learn how to: * Use the query rules management APIs to create and edit promotional rules based on contextual queries. * Apply these query rules by using the `rule_query` in Query DSL. #### Synonyms API quick start [_synonyms_api_quick_start] In the [06-synonyms-api.ipynb](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/06-synonyms-api.ipynb) notebook, you’ll learn how to: * Use the synonyms management API to create a synonyms set to enhance your search recall. * Configure an index to use search-time synonyms. * Update synonyms in real time. * Run queries that are enhanced by synonyms. ### Other notebooks [_other_notebooks] * [Generative AI](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/generative-ai). Notebooks that demonstrate various use cases for Elasticsearch as the retrieval engine and vector store for LLM-powered applications. * [Integrations](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations). Notebooks that demonstrate how to integrate popular services and projects with Elasticsearch, including OpenAI, Hugging Face, and LlamaIndex * [Langchain](https://github.com/elastic/elasticsearch-labs/tree/main/notebooks/langchain). Notebooks that demonstrate how to integrate Elastic with LangChain, a framework for developing applications powered by language models. python-elasticsearch-9.1.1/docs/reference/getting-started.md000066400000000000000000000101311506101734100241620ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/getting-started-python.html - https://www.elastic.co/guide/en/serverless/current/elasticsearch-python-client-getting-started.html --- # Getting started [getting-started-python] This page guides you through the installation process of the Python client, shows you how to instantiate the client, and how to perform basic Elasticsearch operations with it. ### Requirements [_requirements] * [Python](https://www.python.org/) 3.9 or newer * [`pip`](https://pip.pypa.io/en/stable/), installed by default alongside Python ### Installation [_installation] To install the latest version of the client, run the following command: ```shell python -m pip install elasticsearch ``` Refer to the [*Installation*](/reference/installation.md) page to learn more. ### Connecting [_connecting] You can connect to the Elastic Cloud using an API key and the Elasticsearch endpoint. ```py from elasticsearch import Elasticsearch client = Elasticsearch( "https://...", # Elasticsearch endpoint api_key="api_key", ) ``` Your Elasticsearch endpoint can be found on the **My deployment** page of your deployment: ![Finding Elasticsearch endpoint](images/es-endpoint.jpg) You can generate an API key on the **Management** page under Security. ![Create API key](images/create-api-key.png) For other connection options, refer to the [*Connecting*](/reference/connecting.md) section. ### Operations [_operations] Time to use Elasticsearch! This section walks you through the basic, and most important, operations of Elasticsearch. For more operations and more advanced examples, refer to the [*Examples*](/reference/examples.md) page. #### Creating an index [_creating_an_index] This is how you create the `my_index` index: ```py client.indices.create(index="my_index") ``` Optionally, you can first define the expected types of your features with a custom mapping. ```py mappings = { "properties": { "foo": {"type": "text"}, "bar": { "type": "text", "fields": { "keyword": { "type": "keyword", "ignore_above": 256, } }, }, } } client.indices.create(index="my_index", mappings=mappings) ``` #### Indexing documents [_indexing_documents] This indexes a document with the index API: ```py client.index( index="my_index", id="my_document_id", document={ "foo": "foo", "bar": "bar", } ) ``` You can also index multiple documents at once with the bulk helper function: ```py from elasticsearch import helpers def generate_docs(): for i in range(10): yield { "_index": "my_index", "foo": f"foo {i}", "bar": "bar", } helpers.bulk(client, generate_docs()) ``` These helpers are the recommended way to perform bulk ingestion. While it is also possible to perform bulk ingestion using `client.bulk` directly, the helpers handle retries, ingesting chunk by chunk and more. See the [*Client helpers*](/reference/client-helpers.md) page for more details. #### Getting documents [_getting_documents] You can get documents by using the following code: ```py client.get(index="my_index", id="my_document_id") ``` #### Searching documents [_searching_documents] This is how you can create a single match query with the Python client: ```py client.search(index="my_index", query={ "match": { "foo": "foo" } }) ``` #### Updating documents [_updating_documents] This is how you can update a document, for example to add a new field: ```py client.update( index="my_index", id="my_document_id", doc={ "foo": "bar", "new_field": "new value", } ) ``` #### Deleting documents [_deleting_documents] ```py client.delete(index="my_index", id="my_document_id") ``` #### Deleting an index [_deleting_an_index] ```py client.indices.delete(index="my_index") ``` ## Further reading [_further_reading] * Use [*Client helpers*](/reference/client-helpers.md) for a more comfortable experience with the APIs. python-elasticsearch-9.1.1/docs/reference/images/000077500000000000000000000000001506101734100220045ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/reference/images/create-api-key.png000066400000000000000000002352741506101734100253270ustar00rootroot00000000000000PNG  IHDRFZef-zTXtRaw profile type exifxڥWv8E1ZooReWee*"Hfe룉r-6_?w w?ob~}yK=ٚW넷W-}PZ|^}r!HׅB?,[-0: F?bnu~~p߬gxǗ;V$y}.4wqw\̩.~ S,I{뽂4902+?5(E%Fr!Ax} ϕ!bd\ dͅ䲳$3ud1LnМR='ۆ32BܴIV)RC=SJ9TSK=s9,P%hJ*RK+kZkr+zgΕ;gw}FɌ<ʨ>)gyYg}*vRqwuv9O9߳J뷯Ț{eL5-Np3Mtd(Wlu1zeN93ɤ-Oǽ'͔)of(uy)kK44oƞ.TPmv.Ӯw=phe@ ʎ}6~C1gh>On|$ƯDݰy̽*ûS].=z͝Uwk!m҉xGõlBc1yt:sDJYj:q՞=̔,N^=[=&ЮFq z$THё:ŦxdV#@ˮ0;=Ҟf?MI`|ll 0ήiYgB>K]+ʕF9v2'%Xg|=t|# +ώEnR´S>#L۶; (DdnfWYMFe']uSSpةyhrwO+f R)S<hڥZ6q fVv+0w&92kV8OT-fnn᯸e9Rug^dY{#lu:ƂF ##0B(R! uL#i"K+O^,]TTè(Bw6  *\:wh׮pP-Nh^Hu.G$Yp_7m?h-&95=j^?DxW J3}r&"5|=2= {E.{oȣ+?^キݼgr: 4e' g𶻈kUyz0k2BtȽйpԇдfe&w|򠖖ޗ먦^G[ Z~Zc<\<($W3+q xأVﵤϤ%V)).xiĵ \ֽ@"{ BƁG`D(gƽH&ne- #: |t|x|48 @h LOtA{GU,Ap$V*W1M gjKƘ(*8}C&I_b4_ `76Gmă"zEl@ˀgj B1”9rℴB XOCRu,J.@z e!W;h 2 KEW艀5">HŁwqwGq.qOc{2ي B64&⩹(?2œiE;)EwT@+OE IfqS l=dsqPኟAşhaDGAxҌ\G6 NiJK涔k43bD41f^pq5GWmdbc-'N=VePQL)cu k$o Vދv$$ՋGh= AךXM f-d YX.+yP( 7< MbE2u"^' 2?F{+M!DPaE0XgPb59hUUP"^d .fA;йSw4ρXLjUfH۳U{6KQTPi6R)%zgf?}E=1tR'9{v1a0ssm1:!(N"YCHs"\I%c ,G .޵Jqw5 X^ͯp'<\אԢ!DzȲ-L&DWD(`&j$(gڞ\Ȉ} ;dZ*~6g(') rjK @\բRrTgA%)b8WlB'4&-Wyˇ̬TJCZNa zuULoI>FoZӃށ4!F._t_Xb}Q.}YcRW[O%c^CFTN73Ұݚ? |O?O} 5~m 6>xKs]V0ygQc͉i5T-ۀؚYhb6abXppoo:U*;KSUGLN2#jm7j(YfZ'|9$z,2`K\Doh(phsn?(1Y"#>k#;oűT}W!@*j^4m֔Z3Shm(Iom214y䠛GfFiRbS  !r"Y Ti~nny@'}msЋC* !Lˣ8?#(IO+ QPÖxZ@` 3Wz" AZm'  0}3|p R&^==?Og>$.M˝p^G-Ȧ@է ¨ ?h78(|AZR$Q ۈ_!|8D½]. )j[Nq4"Bji?^V o*W;h&elZ]u 'Mjtꪙ/"H AEa^Rŀ ui5y,mڴ$ƢT=֝j ^mմ"Ho- 1դT+@CSM:[׹.5 у碗(y@!ͅD'`+=Z] l41w kM~[[(ԄsSEi%f"KM2Q,/ūFx}jjO Q`х@Lm(ðV]j9Js0֯<1jƤv<5՘pWm'9{fB~֟NIP6 iL L)EY53r4AzHZB_!q>+aE6! ft#I& sD-`VV֮!)Ae#: AfOm zc&Tc ͲB=C(A&Gg{&aS^n}#Q1!)bċ ,|4\ރDn7Å@ bI#7q '`.<pcedy?2IQv,Tu'3X0W![I)hv;>*機fp!3X2?dU|Sf<`6d<ƐNiOj%<VL'z~ QY #gµ:ذ OVF-˪# Fi#^ `T T[ۀA%̋ |,I?f^eRCAھ'ͭOzpU6琂A^qD@UJ=g)~ iCCPICC profile(}=H@_SE)-vqP8jP! :\MGbYWWAqtrRtZxp܏wwQa5hm 1[{^D1&3˘|=|,sgaӛy8JJ|N]YwcigJo `z[}u[S`ɐMٕ4Bx?o@h뭵 C]nC`H>3~@r'7bKGD pHYs  tIME .Et(Z IDATxy\Ue]ಫ(+*V枭. 6mNkl[,45 47TdM@h KYssϽ=sc6)pV3RLKջ|e6 8VUUJJKu05MyT@i]hJٷ_e۩1IdmPn^gR=gorrr @vpWxx8x{y)f*(8,o//*hcN{ ة8a.4A7h+)##wem6AER|wސəzj+1ZSoD'*]裣G>$}G>EJޤtޛNvEw^>;7S5#v9Р!??_u@ &?{H,SѴd~k7sd^蹻|}7 qtFT[  =k*FTΛg"G_T״lw5z4H}~9Ế^NLsgǵ}wlr~PPD<#0S,9KToZnѼg$ StԈ&x֯ߨt_]>FwInU>+vYԭLd$~j0az+??_^^^JLڪWVyy)?`@Kڷ?U;eN?L?V]~HǨjɭ٬K/͉[ Lտ_S/]ҕy˚xf$<>ZaSש3|:P;wZ|ڎ+(zt?p;Tw@'ݬyJ!>,t 诤-*,WÆbqWRsw)%e_i6շүzWTX4 𵏈uM$uHW2kOZ=^1~>57??_= %ϞG@]1ۛsC̉ﱣٯ߰IϾIw5Ow95}wrR$r_yU_~YY=8s^_珊?_|y+_t~͍o~j/۔!I*..VUeϽ^T*-+U˶휰sikZ_We^-ִ17$e|<-gFmFabo77+wTxXV\5]84#vCuÜ?[o8fiyA+_PLHedfw['<,L5fddjM  ozNqq=]8 >hr={_pXAJ?[p^oڡɩ7mԧOo&URVJ4dy֭F2291QUӮԶ?D&I˾^nnn>tH6lNyn]5bx<<< 7U6 N:R׽>t ~Fiذ! 8@t5WiÆMڷOR?Wnn#V{0y{{jjÆ:\XI Z'ψg7:l>:^խ]K J1^ Fu^g4wN\ܔ9zLT3q4q>[8~IWJ&wڹs^s}l}x-EGojvn'#3Kb%fˤg(t;wVOi˓YBm䰡_k\J _ǍW_+áѣ/jՊ%I1#uޠAJL*#_%ߠRW^e+\u? kOp1֎9~||5UTXLSЏ?SiiZ%%mUIIz 5kp8)?8F M5a8}ͷ*++Rǎ\E1{knz}O:Rδg#sJ_[^<ڬ?cT.I ]94#vq|S222U-ߍطIKy};f?2y|8p U%%% 8@]tjݻO;wZnnfW˾Z6M91IݻwS``7effUUUU R_>#y K߿VXj޸iB|-ZةaC{,AMb2WOZZ+WkEErrrչKggd[%%%ݴ9IGB{c]yZJM=(nds2e>TnM3oW]fu2eMXbԴڦ^ǔ[U/47WAA=~æFCn t>']1VUA=^vc\G-}z+''G:i@UTؚneeۯڴ9QSwoIחv`UUU맀.*+-S޽ aa5 @7lTUe Wx/P~ c'\y]C|I Z'׆̎kVxRZ[B+OîV~iWe+ҳُ Чr{L#eڠt~eDw*:jzpvhqE R uw -xM~Zڛ'oԠ0 :XЁھ#6'ny"=R:|V^u6D#)77WvQ޽͉[4d ?*Fnnn5kOCy~}RVj0Fiօ::]i]#j12*((ktRjA]4mٺM))ꕟ.ɤARqqV^u| SzюG!(O˳_̨5-sn*(kк_SM?%X;@LȓA3 uC#>re6mNNSlۮ_mot~ff23:z_,k̦kZoZAa}&Uk{~Nzǎڱcg:mܔ,o$mڜ18zFn>sL-0usx9txs%I&Mwo#7 ՜q qL8oO22jxf׊T\\SuC@{p*d;~}%%>˚+ci0jgZ/QLz74W||r.XZ&Ƅcbܹ[~a4qUwlr224czC d9Vk?ߞw;fSЎS{V}HCWC|ta:DRYE^l*]_XOٓv# s[~U%鎻Ռة  ҏ"b*YŮʥgdX=E<ȿ4=aF0fM8^aZs1~u Ca{. h †:OS}L]'3W_ @_~Uw5OaZaR8 Ǟmh̭QHmխ}Ut/f{u/p2b*j0VA-^W_[}7z㮔sܕOڨi׶xzPܚedd?>F@+lVUU8IF~eeԩ#hN*-+"6L`j/ѨêRpww']P!ptW 8@f3_cUUUT;wV^~>|hǸ< @xlfyR+ j_38' < @x%s[?iz';]I%E*jѺf 頹=5'0_AY +Tjw^lg:zz]\RL*/+YQ??*tXXUm0Uױ7RnRک6YmvE\^͙z +>>ЁOZB%.K*9}o)=h}G#U]Ԡ+U*2 }Ԩ [N[t$T:mǪJ]ѮJ:7 ڏKL&]=GzR@nyg~b=&.濽f6>ߘ)/?rkwek_ՕKJG_nkVBSʓ U֕g^󔓛Wo}siysGǟ%W>{_5/W}|~-tPhpM[1]r+]YNB,2֭kJKK[\XX䝻չsg'l+/7?8nFUWUi$;ѣzt[h>PrwGH1#oV.tVf^I#4wgIROoOɕ$]7*}A5#7H,E;2 Ǣ;/%C%I)ҭJ8!޷Km۫t[?{Go==kC&A*9BkB_qyVlTgk9k]ݕ-Ѩi#zKI.Ƿu~ܒe?.pmz|&%(А^ܳ~}H>Nr{]54-R:x}ѷtPɫG/#Pw߯)7Q-q!z5w@]W6f2*G=uHUث45J2TWEiVJQzqbcŢ>{JBBj$mڜW\ڬ32S*vڕkݬ Uثuϻ?{O-m&YyGW;z裍יÏ ȞZܿڒ`o׳kڧSWoH/OG[z(ܑoI;zO,ެCEa=q~WyuML_wymr?[>]|cTRQ߾VwL sWؖQu0Dorb^v낉$}-CWwQQY^~zv#=">D]1IU*,?:BK'lERteQzzqww;_֘1j lz/k=ğ=r^xiKJ4rdlŵ4hP<~r5guZY.&;i;I9ygUAt]WO/ݢb&꡻.PmA|ߘ#{iuxqڪt0Df/u͟>v$ Y)^IE =/W)jߗIiѧ.+_CE咤^fTMKzLe.wjOd-kMr^ii`&ҷ+:$f~``WIeC,ui5FϴVяh@ 99JMMS>>ފ6X?o3YR6mQjA͜1eWT?_Q>u I;URZMӦ]֮-5r0z0]{k˖_;}rl6 :X# fKg鳷;zYt%t%/Ǫ٦߽>k|sGfNr!/uq]]ekiA 'oO]}u^p'-|P!]t0DY/O_mSnqvP)GQllTepJ} 9Er@u۠4`wc17k^jsG.8ե9͵̾=w$d2K'iT^~Dzmܔ(ɤd83PUU}+))BuICZ 1&_s4c=qc.WOH svz(OڴEgݮ*te6կ]Jzzy =6u:\jO_TV ?>AA%I~nzѺ;uݷ#zU^P =\-е^Umjf~ΕгsMm]9>OEt#"\qu!] n2ӬNG[KJTdmQ3fL$IUgw>_3v; y{{5狯h r:u /OO%#Iȑu IDAT)I6@>>5[ǎud0e6W50" hDnJ-m.-FAOOi7E_%O6+!%GoKnSH7?Wx09((YrJ+ѿ;宧H"zp5-G?2mYGZQi>wٕyTWjnz,Y|]YEZUˇk}J樸ܮƧw֩N urEaz{nT(5Xw.\;NnE3UXf;l4WǣB}YLKWff. FA R=:p~NxbđAz nvx:BW\6Y{W@.3X9Q'Ϟ=KeeezO?|0&NT 93^vZ+9SZSFg)v|K+a_޺s:zcKJZ[oCza@]`;shn*}%]u??ulUVV{jD]{dzb?`2괕}܌U:}'iޤ0=dY&&ի גg;vRTp]43@h.gq봦ഔ=VG^pJeͷ.m= mr{ewGŮgJǎSc;zc;knfu ģF[:&xׇ\V c;QV^NI<^).)QJŜhTnTڕ6y|] Ӵ0+MI%ld0?޳UZS^PES*N/s"UUUQ1}Y rss] <8< <x̛8)!t8VaqNo/ Ѐ鐻bwss?򇿳6 8Aho))琇ŝb7L[ ēN(~fd2Q!@ ƣ;47.>^g!j6d }>s=FUWWKF./Ad GI@u. FK(oN+;NW`ied ȑ}(G{RHQ`r: n0dtn8hRUVV=[Wg67yF|cώS rNeefkC;h@uvB9Ʀay<F|{ s@߰ h- -s8<hlmkAh+~Vko1 r8nm4kЎa{#ϊPNp u;!hGl vyOhN'm8ځs2 wxOhOhZAk;#xB;97 !h}%׆ g݁ CyaPinx7$( 2 2 SNSN9U]tz@Opwl2lnzFA2Մt$IVUu$#A^d$$RFkA:ӕ Lho,WWWbqN>Ynv8dUd2ZGoyEMF<=,d4m[}=N*WUUӑ ⮪&G-0(Tmv8r8򰸝Vq l*ӣ]bɄv: ӑO'$ZՄx@d -"^cihQ&i {mݧNw>}O߱z"26?2258/Jho*;'ny/..Q ho\6 ߻^/]nE%',l6pr -i<Π( _}Nk=Vh ղ0ϻؽ=g5?03zWؽ' w7UWW."ݷS-6sΡ0'>x u{5L{ =']!d4^ Z4΄-] [?AoQ P)eJ]LWD}rLM!c5O.לH_n|Fht2m庖T:E":$RgN 9շ'Yc5$4BQۉVYw|L21NϬɭ#D}č|tBC#48=(Ayk&qӷ6S uu82O\k[u-s[o͛hֵW\xLpɮE*vϴ(Io"5X'fObSpl{5OU?Z$g(u3G.4,ksB-OΓnWzgiGozP KVOR3sg =I ~P$j+7y;NSkr-{k=SXS/SbJ'/~Vsq!h[]vHRuO+Wx}0)OK_|fu_mݵC?>{$)#Y?mصC?v%8QkkXo^1Q? Uɳ$>zY$b=r>z0O,vܚww+]VGww[L'_>޾rťG֫;]C\ʣ(nr- CKClZ?x@~a77T^O-)r `RG"5y]Q~ Z%vh}e\]|~beYG+8N:VEr5vXP;ݪ5 ֹJ?5K-H:v c^?^oJi݇S P$7C1~NjeSkJd蹟5ĚUr$Sەkjǥ\k~?#s|5saD`? G6|cMtw%pZ>Q&hjuG /Rfn\Fejz>j975KReee}i2sg͜X:Uzz͈76B~6X5Ȯ5[%ɪķ>/I~1.X5{'5'vbg=x۩QF/}kZ~oi^9<  kC9TKFWWخGIW^>5/ ZxVSfhԑ~cghJ@3$2DIqudY[.ޢڼ"4I dKP"5:jf}p`=EKĨQ=:R.Ԛ"P)b~')c8JGRIBR,irD  <ϞG_| Neh4nh״>޾7|2d+KҸzOZ_M6)/g~&[=8~j5Kg1Vm=H=y]>ZǮQMyR@&>z}BZz c rEvx^Y}bp8p8TUU*wxus-#P֗_~Y^z|qq>狀Kԣ{{Qeff8n e2e6e4e4]-egS%)jmdz0^2HܹI.4UVV[.T@o^ uKt=isƎPh^C懟4b0eff/~SrreXQv,$&w(bU %Kmh=xVB_kCsiiqk ^Pff~c)+k~V!屇{lvx/--dsj`o(EZu<,@d%ȕihGwh Zn+|ee***=kk8k(+;Grss[>oڠl0d2d6UXdU"fL&S}-8. rl6U^^!~nBfN o4Crp8N8:6Opk= >sZBPyxzֻア 0_ەlvSFFl؝f+##Kf[E<`t+rr%NOi UZZ&ˑkWSw @ `PEMiiO~~-*j-VQU&YQv''֝Wk2Td-ÅrKf7LF,$fPUeJde4n4>O楚^khdPuu%r8r8nu3Lr^w^wM~7P0Z7  [|!^R0\7u|c u<^ߌ_Wðt:]˟(78!wN[ M-ۜZ; IƂo;+ZM* Roi 'BdC=_9H@x <x@x9>-' b*3vřt8ykdddiZc228c 3Uи -^<=JYo%-Wb"/1%J%TNh%MhLLFw3##K:Cl۩5k<=|EeWlIR&kE5fb]H+|S+%Uj_% Vv{Lz/M>.}WhPxM6TOaoLRar^8g xI-ZvII߫_ޡ7^ W_ڞN{)z}[[ԀYgкl 8cΒȹ+0_E5w(1!=!4MR=5Oro狶䋟"&\YG{Խ?ߨ otb^K'EG /Fe8Sg͞NOmbzydEwu^yn&E`F%dK/IOϿY-R?H%\4&s~m@MwԿTTxyz(X UAA3=b⮍3`v{R\ԃiwj~<,Kv$gGIcp8\?UUUT], :k+#=xg:Ib6M䭘ˢYaz9w*%&,vy+H]s}?>f=3f0R< <x@x<hs>WfE(s*rhV{'4j `[=]dQ48b4utϖWv৓T>B. UQ{,AD^•4 IDAT0UUuI"""""" K(Oίk3Gs`~w|o ݅tCpbB|/9mMH/gwҚǎwf<{r?/ѕo"xjA0[is*7uVuOTg*Iʡo!M}D#wm".')W`mP ]p-ztjiqʥ#W?8ef7Ջ6 <;f[l~Z$uqэDw/x""""""̀`NGj&&5S S2H%8[1\afyKfDv1޿qfxwٻh8+LOO00y߹:n#gS q+,G/-±hVcz{jl۞-\z7? `_xr u ~l-3w|%|\74}ăCd ˼DK'G{E2'Nn.ў:BpnN9PJG^h5\oZN+Ņ 1SSG)?Hpp8Bœ N1ڙwbQ[]rR(fQ,[X2/"""""~t ަ>!h񷓝1ށ>OP6e+Rtuf&z!b:hM3|wf[$A\Mm{#L>A:;۰%ʎa@gaL(""""""Gם<3Wg!dۣw}BђC,uEDDDDDD!](Z5;ީ!?E:EDDDDDD!"RICEDDDDmR(((((\U'8~`O'n: ;ɵ1rmhI)gC PǠ IҞ"""""" Ws\bnOqpȋBu(|D phH,~/j dYXXX]ldC(e:}""""""]?E=NmCՆ(݇()XPU$խhSUP]d ?^썲a46|r-͙=lVkTUkM={=DwSꪵTmњ:GQWM%jlw_ma \7{u} pi S]qȻWo#7_ݡq`R];,lBjUUMz~A""""""hF19 w?Dz2LNfr0* #tvNa9z,xjrɖ!ܓX_ fM7GY&Ph`Ѷ?DZzzav~>JƏc[ BE-=Fk`S޿#̥L22fh.O1<2Vg83t7f ~n;ў4fIdH-s} ?13?dc.櫿TNZ2ر {p)7?r650%3:1LWlm{iq*z|5D!35 "=Y!0C6}42MX v$<<1Ci lYؑmt$B Y^?t7G`;$ֱe`Ѯ^2MGK ل{d2e>HG4LăR}Ek#ѰiZD{2=unz\nx=N`EHve>!28&7Ie3h{F-2Hgfl۲e8F f( lc z`Y(N2|l=@[>Ƕb#G$""""""}>en!aNyᕎ0>* ! S w|NRS>:H\|ߣ=DlaY pJxِj< #<8At7&L|0A`9MyE2ݻ L&Ѳwl(fwڴTTT }\+LVc>""""""WeBҾ$*_[My0]c0{l7Bcټ0J vg7c;h9A\7c{v2gӮ!>40܈'@k|WC.wTWưkhiypi=C#'hn׫Ic$yy5tt!+]̯T XQz}zor)n*5Ї֊$M}JO)?@']:}EDDDDDZsZ1;KWZm'5EDDDDDB/r-zkۇ""""""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/""""""" """"""" """""""/"""""""se)WO񓊗VtimfWEDDDDDD.e0+nWtӇ9?Q+d?;^=5M}WߥZ8+:Z੣N""""""r <^s+vT_7Ǚ +o-V60C7mk4W;o}_}FӗK_cg _vfڭgs/U{7w})QWaO{RSw?K/{6<7ȧ=G5Gs]_S^9\{+m7?Kß_O&?c<C8+Vz ~g*>T{|4^DDDDDDZ_M7}8N+?fG >}m xe nq𩯰񇝷 4F>vvѼg߿z?#Yk@͆7DkkXkϞ{ի?o\5^{+nZ6/sOd(L0w̿<祗_YXm~YfWW4|^DDDDDDZw 's:ں 8/׸LM/+VJ_G[1W+_(coq?}삋U_m~[|w{me?^;ŊnuMGwV̾輋O|~ȿg/ _XA]5/';6޴tm>r $j^z |w?Gȍ7 """"""WN5ܓ83(O^x曜:u>J%""""!5((((((((?Oɡq}e4;4z~k >/|`sd ypffXRݡ~Q|̛So ȸ+0W """""""Rw߰tv7MUUaZQ(WldU;TUh}kxxΦ(UN3[DBwC !|8 _A8ހ?@2G(G75bz'Muoe -kwh`=ƆT&κp(죽qƝdsoI~a V78TUGo#6jN$0߮Efw\*Lm6RuI[!:#smj\KIJUqь  <@yt~]E9eIKk19XiT> O8њH 28^Z)|? {LίdrF*{96UA$d0q];~Q^Oh>»3/>MoCoe^=I+/i3S40P~ҋXܦAŅ Т , `NGx|j. z &>EZZfm6pbfn:Iph+D㛰lȶn_a?X[u;{"Z{b5 'aԣ."""""r:Va3+F̐/ρ8! Lš{9[Ow²2I4 ('nO![H1Եxl#a Rm0Ce^ :8,W~{mn}\, ʊ"""""" j%V&ݽlsIw>Lq '`hOyt7'[IX=elcHAz1} Xmԓf(3.AwamA"6ĿX:T15]Fm c6%DDDDDD.N5UN{5yO ٟ=wG9}7ԩSUj, Aq.~+M;p"p<""""""׼W#U[v3Л`W1V!XFL5jaN~IЋ(((b׫ DDD:} OȻC]wC^DD{7EDD.3g79poZNDDD]DD7sZ /"">!"""W2ğ? """o+Dk6GwPFLCwO^=ϽPJo֡v'rٶUk u 3%}Z nP=/e[+G"""@؝Xs.b8veOpɇ39v'4rCX8b5""""""&޵rXG"@$O#daY,kiGpi$E]VDDDDDD*]9 ֡*L]> mlwj3of].)Juif(SR|AFO]V;kj>4wLV78TUGo#Vָ}fsPB!Gh3?c\ktK/P 5?P#?Z@o_|\ʡ.@ׯVDDDDD/:Ƌ2GƋ3mmc 6,Cb yb))CԏWDDDDD-*jafȗ* +,G/`a#70ҷ|_;8N|.eP&g瞏K>s|р|v\%4׳<{n~bn~¹ -]sӲmZf)J:싎ni ٵtyQohIw&[iҝSd&B#<x$lqyDvO#4ApɁ"}Vo5Hw$Stɗ&=|z5{(HT]DDDDDDBEdmhݝ(;gDBDDDDDDIDDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDD_IS]*)A꫷{?~j>6$K1څGUuA"""""" S>ѮQOf2h&44 W_͌n?G~Ƀ{I0A{ y|P]y-IDٗ,Ы)QCXmeY̶E } ;菨yDDDDDDo.z#\./~)ms}(t6FrPt;@k UQ:P4ݵ1zt/쿍TrO!Z*ڦd~~:0u} 3(ev<7 q ʇhJ2zE>mu@;{tGȥR7wͽGM5 h;Ʒòk[e|=r[o#V[.LP[=;].>BgS*E~f?>ZTD=DDDDDDrxdK noG\'!:39|hnK};9~F;ÕC=2Ç{{6lk qNZ2ر X4iHcIfx^ L:r=M#BV ɄCX'{h 0 ;O:=EދFa Ӵd8zp+6ɞ DW&v0]1˲$H al34]Q‰o!*Qn5RC00[i (>PFl‰?i]:G~x ʭcdCC1A=F"mx3[yλ~Rd#mq, ;G9;U ڶD"d.""""""2Yx(= < F̈́f-iV]`!2z'pK3;+*Ǫ8)g\|6bǷM料q6Tlo U<5CAy9JE8pt ǣg=B0M)> *PQ >[Ǝ`VzBKD`Ov-su -#@aŧsՓ2"@=I]ht&.T%'~Q=Fp=msܥ!""""""W M6Fl93ɥYyM$kAӋb[bfOPOQ(L1>@d/# gqyOM'i.M|h| m8>6yM'7aPJI"R|[mBѽ,QO?Fw+X^N ozȬ3;@\)EfMC[h^='(J)_y2| Ӷ -±hVc䖓58G-qa-!| "vGc [P./!C؎I)?h 8#mضCkas{밃50({gR\|!FH.B[ ] SrqBz FaTvFsӔJ% } Ot9`L{rtWg͕;E&W\p'`hOwe kݒH鉼H{+} b{jXfk[ўHԜ\Mb][`8홀QlpW.{/P[d{,3`k$uuq:4]alyƦs9h7h1YM}.)4h HlƺOiA FKI'Pg=Nm]*DhZfWoɅd܅󜷽c_%Mc]le}Z-UwS94ݵa@D|={3gf 7|SNqGWD.ѩ7ODDD_!5%R(Ȼok?Z'("""""" """""W?0/E[?PcЋUo3Kw_~̨DI=""""r?=næFVK/K/_<o{h"x='>a=k;;[oa};g-|ijp"/""""r%=9 ܮcE5(\)/ _)o>#^ZƜýf8Ϳo`'nS'X~/.Y޽|ݏx"Z{/ ~ NW_m[XkߡADEDDDD./_`YyVZE/d孷v<|3|"+Az㢽""W Dkng߲ 7/[_vɦFn8Ekn_foWc w8lR{|՟%[֕#yup_Djx^z{$'>~O|r}O||vi~=7@DD^DDDDd}rͿw{Vc\yx7m֮msϓ?Z =Xk߱0ɧ pDw@WUkXN0Nm6~:I]>&>@ 4L9 ;pCa'#GJDDD~XX_s)=uŗ6uIݵ <+'g?;DZ +p=@M1ڻ^j{kN-۞z{'~o/,|w>O♿?N=wED7.z# ˲, '= =@>PË\>Y Q׮m[ڷmY D*pɅ{w?w[+xϟ{"ڡjCCA꫷x)nq.tNaC6RW> l~CUu>r[+vT; C!TWjC4e}EDD{.#۽],hWrm+k;ȍ7o`}m[gSt!/ua/_X~{UfL 4o# ۅ~]9GfvOc8fmm!Uz;e)&̇h۟ep4MW$`8mв?^LIP*{H2Z5D-b^X Mu/ߝ5]C8l▎=Bhabs=Q`ԐLd[4$>B."""׮>TBuXFЮ|C_z~}Ʃ"zgD( Lw-V`BԺR=G`D0'<@JGhč|Da0a%ߣd 㝳 l?RIo"r{Ї0)z]%F,&ΝdW$E@[rQ&Gӌv tx[ S 5/_[[$+־c',}˹m*lj[~ o};K/"rxHvm%$U5!?QCЃX4iLe9$ֵF[Ӟ HLbtX36C}ѽoޏZcշjٻ;ݱ}N.JA!P'-An1jBo)2Y3;.6w/6}-1aZlf*#و8YaDmDh~g8C$C$~+kFЏ9:9&XBjQn!io@Bsfp:jVNZB!ń{m?q{GƜ{Og6Z-}b_2 B,bt쿾6gG%W=5C|WqƸd,!.WR B|/[oM .>G?ȫo2x*'o7ЃY9B|/K˴H@!B|Sd>}=Y\į#huu]  30oGղ7-B!7u ~m=wSw|3WyYu/LBnlHFQ!B[͊_o8exX~H O&K9otBY !B1ᇹ8k ?b x4}'͛IBLB!ߨftiA^~c⻗.UX:1B B!eK'G>#'<}?\=/$钷BLB!߸ 7}9 ˖-e+Xb>Ͽ`/ڸ0-)]^VX1 kFx!BqClSz/&p"/s/Ot ? ޗ.U|9!B!!l) Zt.v7},9׼^?o~g1(g1 >iiEG#{.z FO/3@4|ZlBNd B!]ǡ_Ͽq` !HFB!M/,V,y_b_`T$#B!Ou?JBY># K#uև* P7 f֦(vB!B2y/g aKzq`Fsc6؅ʹ؄^/B!G9 ­F? 9iwhH}fw)6n 3FMRcFy='sHz^:cʳ Mmn؏FNf&jHB!BqQ1"v:!Ay z'z੩ ݇ :8;N[S+;(wBYg=4h93^CP^!npq2ʣ n F7go{쩽}'T̻onُ?m%psX}Դ !B!X^kN0)SF}+2|јQm֡66|vBH&{Q0׻8B&O΃L;9S>+ۍ)zsnlF% u ݬDI6apB0FmP(֔IQ(B!BF1dB!B[0נi Yp!(|`AզFV=T9z*Ao5 ˶ֿ_ AUVJPC=<4 NB!B!n^\enj ֓S lXVGC:r42oߍiR;FEQh:h/Sּu£F{c!iB!B!/Fk!n~z^k7ꫯ&㌍qםRYB\BoE)pƄB!B B!B!B!B!B!B!$B!B!B!B!B!B!^!B+PJQ43iQg$3LIS!1!-bTB!ڃy`0Dhd7ckĵyx"bJhۂI+MF WÞ}BR&#$4XtO=DEu(ޑٞ.zZdd`I'q9)kmV !G^U< M45wlWWtX '2~B٦-W{ǃKX-q (^^;h^H6T uSS\kcd lѥ!5ݳOMyvQRs?%ߋy{s8|%YX,٘Jo_lswnV+00pYR>S6;އ{i*G,&lTeX 153w8m6A3! #87 !/}#/)8֛xߞ8U7 iqq{0:;U3v\F6ԕgWMMUs5+.,;I ncL?M%4Mg){rv%رѶ#<@,&adgU%u߽G6`сao}дP׭$SGL;4wBE4 ާP/MetL4cM>VS\M>o_anBwqtt94o Q}mYi }⚶ĩl}dV๙jirkԹDׄv2"]䭵T&]Bq57Fc^ŭFE;z!#D&5Po7"=ZOOR=z}e:wm@?y}>ic_H/h|xtIA']Ԟxou`O/l e=y-!1 >0~?n,dg" ?r .'N+P_F6_UXJhD\ry7g~&g>V=ADQ+JB% cV5ѼJs A&k+?wtVdMIm;Zoɩ= m^m_Nixe7X۫36ڋ{u|wOBK2zuBճnsV& ?ՎU?2g~gO:iޒ1/Fh;`f' և7K2Ap/ ťȧz-Gq4( :9RkΨR1mv ~ʋh>҆z!Y">M!?HCCڟ`].k5@ʼnZu|!gڍ5 ƯnSg+nd.ksGȜsY:!,v-AczFmU"`N,>CR[cÁ *EE:m[r|Fx`XÓ0ڈub#t{s}U6X"1K3pTRGqk2  J$`6B0L '0딉ƃV]h -L[hj섻q% ,Sٚ7meK$@S1pRrlj%|Gl Mv qR6mL2M64a ]#P *1bJFw2S3Ѝ/ 1>/n>؅-9tlF 8nn{MЇ1mhF_bKs-VA6EMc"g7!y˧:n|!7%YU_Oe毜κ cC/h磠3on+` &NyDF.FU?X,Y4Ms7Ѝ/nVb4A'o$S6=PU eӱwrU_A m ֲ6SmUS#~G5ulf עl|/c=ӲO 004O m6 ¼m )?3skLkY޴mh^σ!*4]xGg z:, |>/n4;zӗ)Ƨi߄.SeM%f2ӭ(hټ  S}B/ޅ;nCIAyv0M(9Oekn*M,S{1Q{J=@qTF56r20jᣔD9S/CL76FUG4> V|2kWU:zqTY4Bqݡ<˻<FJk܌*R HO7QPo{C6ͬL#=J'6QFy(l>m$䴓Y~44LIC7!9hHCkl %l1ÓY߯ű_UCMU)h&YWsޒ9|_b5w#$&9@=kA!#ay$E34&.cΩzVL؇qh:3MRtO tmīXބ[sLkF?ўjr}: ,:k`Z;wU&fWD+`M Ls ,2.n^  1:Y^p ;8u։]2YԏɩSnm *dҘ{N(kpFC 5u,jjzU:9s<5pϳ,gM )e_uoC%- 4xâ%tĞ?3uW=DUA)Gg=8}Yr1wwu4ɱ(f+'<܉YOI]ԉ-CKGN?G].|٩jlpꔃ2MUQS~e/OÙ'8T%*'~ls|m ;K?g;{uBq].]Bz E{qXqu?:S#JZv'kquPՆξƀn*[ DtO`6zgPATevֽ|10]50P)NdSl0J8qwjm֕XKNG"le(h`7"`+7>SGex bʰT̀Ng;&%vn[^{m4~[~4؍hbd$蓅T:^TZ^l9u`7nv X*X;Jrtuxr6kּHZ,/7~p2SF}CӇC9o$ٚ5u>Vc/Z+„LEL " ,U&#* E=TAݸ=CFf* '0ޜ]cs .b!ajk7 &ހ^7p~j:9>mtgz;dlf쓚\%#|Rz2}^ܞAjqgAAFcA`4g@K?*[1;wQP f̖|, vj͹X3uqg9B4 ȵ\VN79%~^`pg'%բB$E\蘺y7' sX%S”&=x*|v>Xcǝx\=A }> h#Z@Ѥ$\(r=B!ax k &Gq6a^*.Qqc_tnV խBHd i,K2VN$ 1')Z -,[&65ЋDZja1,ulz'Rbl-pUZeU&q יZ] Q/2aĦwv*Hyq:z~|0vfƉ"l*'ʹ롣jMo-D(;(1kX~@Mtʟޠ'%HH _ k{9q*ۃ{:.:o|$CsFxE\dl;[CǨ+?ΚO|߁bi%9E̕Q\|źy3*B6xB[9V0'Gý͔ ؼK|@㬫g4wZSql,%x&>k'izPG7)s ִ91xBV^Fq9t}F7n\ᾩIX1Sa\KGzpďpF8:3M]/ib3"/Z8:xE7NT& 5[{bh4LRւд`ݷnOvQst"(~w7!uHn|-$m Z^;+AFik|$sИ)d=q3:JǧvƩNtX6|Chq↺NI첱YxZO)D/p u3 ^WN8~o?z{# e m,E9'DBtB{i?ކ5rWlFGzbMC4Rq7rQE6> i mԷ8^kۧ^r.,b͚?D{*И LQ)TF ?{)PfT0d'܇>KiruQcxwn]DG9O O\8 q}#ӧ~G|/Pq{*w+\\LYlǢ7%%Tɦ K/IS:.\lqGzi() |]71q|9L_f{?θk\g5cnx^^<KLc+%RM'vPV!BQFQ<qjSk Cqq5_PH>?C@,56J{Tn.M}lc2eޠ?JCKwTg`.OdN"F[P&+SocN,C4y+6MW&{許Sޖ]*U RHem>Κ'[IUG(iNp xZ-uڈBC FQ78mm0hG54n[72{w%cC)ڈ?n~6j'fs1%ޯP9frzI1gt\m{wʹАҲ).hܺ%F(ˈgCxVSi#E[k荏@0Nc$vDN^Iӵ4'q[$t^ݍ?i^O޸,XtŐpFqﮥj:]xMך/C˜99QoVLiRWG 82xֱo??.޾ی LGGgawq;h?PAqQ) ZGjMZ6@<+5V'F%z:P<;M wcR@cMke+99;B;^3O罹Nq~|yn{IFrK^lJUz0m[맧yo4{:rvn0vbc߆ηKi܆ֻ$dK/n 1:__K5(( Rn«! f8cccugTil+ :G "<2 IIt X훱*5NMNzwm 먛$n*c}#%GFuF !ﱃSw X㧦OS\B1/ݥvz&vnt݅]q{UB#Sug0nf߂0'DPptEI^βUq 6~mF`'f8.ɢ+:,J_g4M̖~ڭl|ط'C ]aC8]'Cd 1}+VǁLyWbrkӞ78bZ[/$K/^SP4n >ڭLމ!$ƄB!n#G0afuDf(M%x7ERbVxWA!%0 ŵX$~TMp0Kj͛(.$&#Y腸EHz!B/B|$ B!B!$B!B!B!B!B!B!^!B! !B! !B!Bx!B!BH/B!BHYF}SJP#GE8l5idNh}dV6&B! `pr9[Ґcw{gqV'G:wSQli-B!ֵz~MFEBc}ybQn* l؏jyhbu6^vj'Fi[B!B!ot BF⨲i sC.LUFӍ Qnamzyv|#]IO7STcbD<|t;C8H#5gػCL76FUG/9cF*k z[ o2P=)Hw8kldiU;t8h̚hIG+6]PĶ !B! " &:ʟţJ ':wbVSRP𸆰{9{j/Fu 99["8kO!4curꔛfN?tĞ?%WQU~{9~ÑYASu" . v?j6c\nD6T@w8{9,>jZWWpN_ǽi!`3ps+8#A!B!$^:2c@lq3F!Ƨ{♌Le14ht:"MT(S& A! -Z tzL4Z#8; (Lwjmٞ^Co*:Bpdd4״v„Ba\L: m6>& mNFQ 2`dj~KI#A!B!nrq (?&u3ڬ'hn݂ )ܟGtC4RQQ /2H h%h΀~T 1(c`pg'_QZ-*k<\GfK>f\˭$)dck(/70b}A+B!BK0jOqV=׸ s\Dvr17! GQ,YIU;@3(bУS\lg,'7ĩ>o^a{׺IcjݜѴNFB!BuBRTq52D[@ bMBIe?=#鱰AB*jˡ߄M3c>jOdF( P;"Ca5HHclZ>1תoA ZN7fO!B!si۩4p4uVl.j :j4̾|~zle[iP > a.j`oECNMGp7nC穠 O߃NISkӳB`2v7𩄂83u[ *+%uG !B\ɎkQn E]5"叠3ȑ B!Oa-EهHmg6QSd!`.6^ Ak} Mv8#ZNNc߆ηXz})zP{59*<3TeY:]4g5h Q}LOcNyo4 :rvn0pmʍv ͭYFC(9 6Z:9B!&cT7W=5C|WqƸd,!.WR B!7l񢿔JL BXm0mޜℿ~s٬qԿbԚ`ڶΑ7^}y~@8g3c#5KY5X7?w.:sk?IwSȖ֥,mxsO;aFƗr~ M\y?8?2⤻Y6=:9>i68.%.<]^0 ԧO9/S?qϓS̽o>{5:_~!4z6^㔤d^ p: {iuyk|.}IǓiCgN!gq~0 >|0o >Gmp1JzO|p%g~O΋'3y!.~3ʦgƆ9'7 a:i xKْ]N?O}>y 2 lT:]1qSy׸?^Jby몋kR6M%+Eagؾ1U~愌 ! ~%ѩ//tA񧾓rdcl4cuwRI[d;Ftܟ]b_.S;&/Y5w-t\`ױ9?̉!Kq?-Y<90$M%9qYd1_<ϫ? 9aoE%w>Ywzo1w} V$r"Xw&Oh.hIi<'Yب_2ᝏ!CX w+c*-_?džky>ZH>V|Eܹ+I33ӞL]4YH;P=xG#ڨgK/}s䙊/g 9rpnn|FWko^w?`7Ăd6a7yF}dV.-;e d^!meidKtTTH-xs5 ~3Kxp5IgS`q*z-L5w/ϡ>f|"^"27-Y$ԸYljpy =\ȶp dMvLt+?]^yBKYm b\Cwƣ!qۑ˹k%\e5ɤ}g)ŋqqRW.;ق1ddqqŸs|8cYH^k35g $r/ye'Tn2ɱ:[E3OGa4vEA翼MPt~4R536LKރ+驭l#9ΓCY5Y7")mrV.gAORy؉oaJ[ v]=r?O` t5%}qe[^M1qW\pq8ҭlsf/&LFB—yg-V\IgV)i9KYT^``, A_νco8+ي42/2y9v$o ~K`dZ:dѲY2/e/L],Zvnj*MRpWD}QB< _r;X4KIJZE =~R2-'N}^g ;bo_zYy碸]k5vmm+;0,ֱe//"YAٲdR-8Bڌf]1UI~yɤeƬi/"##l0rEhd ) >p~K-s[_}}>痥Tk,ֿtƇ _r/yxkybx'ZV "Tfͤ<@EFcS@OG=H,+2-tag;+r\ cEe h-g aP*.E}=+fC;pYyN4ҥjVx6.!Z&e*} #2:>##@dߗmcWa;"H=DwivcV;Ȝ6.M_.ɽ[kVD|ox10]SSKu[ӱ7/c/3am.>&V #sxOɷv9+@98,rdm勵w_&3bkӗ'~ ǛPr¦=ԕb. ۣmavYOwG 5um 55PGqkgKW%nn-sohC<@ImNE$Q=}=ώMa.yS g:qN`fC96l_0iu 8RYɁ J43q+#8VHO8"?}Vr׎whIRǮmi#GӗݾpF*=|`sh#ﰷhkw[]{8LJ:T>*m[~{nur k>  1q6מjަ:K ykc&aȲGGg|Ѕ׷ FwkӏԸ}l' ؽ7Cӽ0>kkJi8#P<_u涞QT`wzKKxs/پG_tf9K?v1y#D;ZK~dt5Nk_wVwh7=#ON|4GGzaj#S]_6j;{!#וk a~}vEÇ;~ܖ1$Ewd>ټNqĬs?oˤkKu^/]lqÈ0ݓLB 㶌dB"<ƺ_&0Vr]q~i-7$a֗(Y g_ 'M/qƨlNw|ȏgqz5 Ld>lpqJlChjȾl!HF_v[cϻc"OaJǑгCk%pn #(>z o[޳=w/jWn`~ݣCTlc;c1ڼq k֔=ԜQ.fκUۥ\aԾo͇[]T.w\g0_/{1ڼ?#7ȃv __KLJ1Pɺ {?]]ĿQtG,^LɌwGX4{D~6\m:®-{;|, s9 ?ykA,56eGn wt}!*6oc#De?;c4|?[7+3.3C@l\Wpߕ}]&fabǝ3ܽ/]trg[{5l+l".-ѧC4{g쮋9OX'f=MHzlm`Iܵb|=oV52ms^_ǘk%:嵌ѧRDDDDD Tmz v#bKu\GRy{<.Rc!)#5U;Yqv/dýO^'&;sܴC퉵 ֤pw;>|~8tsp!t5llw0,dO.ez_5K%&(lX,{[ï+{u_tgKwO̝Ezꭅ<7+ y~4E}{;׽cx-nȦy\հv|c=[T }m LU D֏N)Qc0ܺ 6e~12-8p!27*@ֿp_=DFDpAwO^y>_U9K֮ I𛸡QdXqhODȷbS}Ɗ:32Ey/Y5ɉWƛ?Aϖ7c9s*=nc8\D]{7SUEx=`hiIn1~(oa&Lz _"srvWRk4@stym 1~E~'egTOeKgF޽t V3lNGQP۩bgi@k`vS=E, _"krvIp7.WǓ6r*sµ(}McƸXx Xq3Bb40OKHN%{+>VlSbSQG?(G-ϰгC #r~ڌ28) `ۍ@op?Aջx܌{h(]|ЅԔKzl^m;[l:b3O&d0sMyon]ӏP_WGH @!--.vnm>$ zs;".w8ǰٽo ԭ=z Òn}ZH_ʏуlJ);uni!oQO|Շk=i;+G/ ù `?}knU*]gnxɣ[ pC>9,QªMoPUjRVUW 86}z䭪wc%2dҺ"_}*EDD-}?WBeپcYC4WUjqMYG?l 1M~MZ~>H~|6onltO%mSCkoWQ/|sFu\Ug=)?/ն ީgצ{r[ VvWF/Qrb'/?B~NκzMAUG|opO~]{Vx!ݻw1>9~Ug@5GH߿uTE]C"ނl!o^džO~{NϘt-z <`)};:@}c:ڟLy"_6nHo VhcJ7O<_PGϺ]W% #{o=;;1=`u jNI?Ve;gӑ]Tjvccc]8gA՟xeQV`[G=!s6X:bb:rDn`EDDCj}]C"rUioTVI. ЁDuqϊܒ3!آں$}Y'wyoMv*6a;BTUOmχ?0k+1-|uync;>1u7mdw7? ٺU_oȏ>|MU{S)Qn9;?v}bꩮE]G:c{9dM8]$?y u]߃3p d|o xjj;p;o@"""gP͋! ѮTjWH}@n{A4Wq*җĺ|YGaNHodX]uDZ`"" """"EL_}w`H۸yN<7 {Dv}:Vo[A_J]O[YFC^k!MwϭOOG #Ogn`Cꢿ3=ݵYD  Y2.3gȦ6^fd1uTz0oEߗ"1\̓ ĺYkԍv":ޣ-+=Xj?W'g1u̢Iq|/Jי(ȿ`X;󵲏߁Y}#όJcL~_rIo=-~7I?uOoy0/ψU562:+G'Fw9k,t o<΢uYi " ?ϲL}GgXw{; ǠenV0ןqtz<g1=Mˉڱitgc ex^֍3:F:2cJn`Āȋ??fg2[6vfue@W񜍿 ?}EEDDD..k.m"-{z-GL|u?/ y򛝘Q3_zzD4'=ȄWד>,6AX}}!/À-kynՇF*?fT::ь֝{߫njh-LZkdDtw~n'"W|E:vU VaGaM2M!] L;ᜨ=µ'x:gST$K$qaFV3<07r&z\FNu~̱H5;2I\27{Z9Ugo O'xw3bau1h7ƴ_Y9A^1<9 أd^uQFsן?xscY!XC+kofV-|⻳]LVj4NI"n2.ݮ=#:qxn7SVo?;1,ԃ t乤sM}>E6Ǣ]lmq1u=Yt{%ԢΝǨ@{'(WÄ9~x +ic,Dۍ=^e0b vYeĺ'zŲh|-[. ,YU͒Lnov"oGGgJF&e {}x<^,r#<3#|x4w?(hMVװFvRf?ѣbhv0?}^DhW| +g5}dɓ-Fڥ%F 'U&p_r"gɸ8<ǘbY35u05?;Ge϶rMj䶺JLMͧS_: /NRřV..k&X =we>*"""Wα27^ҿie~,xޯa={ub8.û|\YñN~ݛ⭯'H7`0a} i̛ c`/dW2o"R_# ?y*y. \Ve|\S_G8z0קTfv5U`0ulnN̬dh2f&+ϯVPSI݆͑%3z!""""ye7/"ruЁ9xE2 ν|t VMcJQU8V$?M]``BoSRXԋ޹8gӤ'[14k z)`h*/85\䤝n&u,/pJ*3ViYyy򋗑;TSq!H+c次hJ {F^]_OHN#iz= PognYO;'SʘVr74q%y⛆Yd;L<͌h1m'ra,VIIMFLd XȘ>ɀDΤM?PTj${,Rmf̖$ a@@ }:-80y[̘Ù>o׋={#쇰*qN0c4=FŃbt>كXXpc0v;,c58&3ofޢ%da#{DfFXSY*=^fćp|SKszsePv L{Χ98p '~Y(-.Ő҂n)dG F`{w^c"ٖa=F#@EE kNa ch7A! FsFL@"$;> kjbeX10cLc8XM`|>E,9e*eq\L`8' S[;cY) wY9.W9ES2<:g.+1%1K/Eiim-V g^c9sU58&RӒ>~Z^Fn>mi唕SV:9 ^ZLM""""""jz;e89Ӈ,.; foyoj5sh6cn~ 6uxfl)ɞUK'b,}Rkmk҂idn5A KAJ<oJyosJ!%s-0xq2JڋղuoY\a96|^X:i0Y\O5*LE]C"PF4m i͂ eRs$@n˘5)|x K IɚM׏硴;s7g ./~W/oCŤμ)qy+p澈 ,33q禑9g%en7 ϓ̜[H3!/w.gski+s..\ː׋|x5)d 2!W`4*޿] IDAT1i:~5ޠ"!<ɘ>S\ ]AHI3ac462[&xcf9t#w<6 M/d1&yhs ESRI|p&FX8 Xw1?3OSfF"H[Īar/ 73 x j6ؘtiJfm҅cbC/XSSӌ˜?b) XHfӌI_tS˔Zf#IE,t_L㙷p,8'1o3&UX >"""IE]C"Ң]}ç "׾u ೦e/Rf6c&XVRڒdb̈́`Ē:~Φqn<,l̼IxpzxG,lfLFé0lϞAň`&%!lJ~/EXr3=Ŋl<""""""ru]Uu/qN?cJ|y c]<uq#@gVsȫ6 b/~&:lh5^M#FcȚF  ^<^36[辙-ܺDDDDDDKñN\Œ^SRf{GH5g`0<|#BobbN Zno2To6!O'de &LX&e& a1_7۫1"""""" 𡌎id<Y08&f\C^˸>|rr3_ep%z LƤV{[,L),-[Dn~I_po:c6p_F趒a=?2/>Ӿ|<ɘ>S\ ]A0ؘtiF|bƲpd,c?)'@$FMeJqEdze"Sӌ˜'x"b!mc]O3&},Mi^*Cw&g) 7|ٵk%Oy. _bz椐 +t3|w'O<ihhk*H54TDDD>gîSHL^g)x2ďݑ.""""""W\Jpy,iLdNf `eő]4#""""""W5܅^DB գ."""?uo;ULDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDD ~齉:ys|^)VFzOa*GsՎʘ"5tj}83 +'9m,K~gN S6S("""""f{P:W|RR\K;%aڜZ#֔Q8zau."""""vU#dtRg93[Hϫ{1>ө5b˘M׸Ek}:|{b |8Q˖Q}>xXO>NB#'2j9odo%yd\֛ 87 !}~\FiғWQ:g"#ZO32s6%ETZOp2 /AsY#my9k($$Wm>3\1g04J|B YEO2wy<㘳fS;~ 905%]I^L!>"r]ޯulf4$h+(. 6(}{t2f)͝Nqҩd:ɦM%K!W}79q0|81}-"ſ9ƋpeﶒmkȱT5eeKᔩb2X<̯f৤x/)%l[5e[*"6&r:S;bsY$\YZʦUpg3u'?+?2ًKشlSOSdR.&d:nfbն7ٶt82'WDv ""_4nNe@2 HmLYĦMN;,&p<i鿈Brf6a4'f%pz 8gf3c293FKcZ<<:'flI"1l_ީoe}fs6$3}Xlf3X BM6XSYFȕvZDDD7]sHw"? LiӰP|ƻ 8]=>|]I f6shǚ:&KFFf 1hg00Mݳxo% @[»/̽ ^O%A!-Ƴܹ1 \S] d[CWrNևHgWK $!j\<>H&a$+ˁ>p)ñ}u3NLcEDDvqޯI*)v[H+Z=g8đ:J&~} BLʯ/PNޘ̣HKNFj/޹q~WѰ} FKؖVNYY9e)d /"Wuqr!""" #K_C|WKN!O$5e063N%vC/nUwvKp@:l,0ųNn\of.ܡ=OzN b4F7/KVsxpXNoS;t:w9nza57e0f0oi >ŕ䊈`pi݈l[*e^~oysCf6i09)yq9`Jk\iR8sP6')s[Mv0>o5p{aVR\)K^LÙ0o6~?^k+q{xJfS2pX[֗|%n>Fr˙χϽ&bkFӍZC;]\Q:l8">/0ɸܑCHNM!BZM/tz?s2:4)NH[< _#N9MI%yhyd/~dz(仇~ܙFgOAAgIIsv)i$?=9Z$[y~IbY| y/svaY ؛0+#)%1oV4:c^C! ZKg6i<$m XxuN5]}çGÇ3=2gh/ں|YaN?s6E+)vw/ZJ MLL'.Y#Fc&IUGDDDDDD^p/tBDDDDD䚣.""""""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/"""""""RwI`޹7c|(Hйx^)bedWgYDDDDDDjOwyPwW< !!hOrѵVl'ϥ&"""""" W6[7$y0%4n  7cJnjkg J~ v>AP; VȜdgPt0·z<w*e;HFvQLeӪk1(w.#N91$n#""""""_̈́),{+p'pxIL(}{t2f)͝N0e[N2w&mI[ƔXIgWz:J`?<;[Ɣ%|d!8]L_UƶbDDDDDDHO^ khfu5{kF`1e69`2Na: ` ӝE/`a`,f3V4.x:13- ق-c>9Hb6cO ' ONGx3'b70_DDDDDDaWk`p]0ۦa'at `ײLлLJϻ )mǒ1Y},iMkkoKFF 8dz&>h_C4F.xTwY¨G @)c[HnL+5R$mao{A]$ۋ}\6Ȯ(WVCiIj Nu@` VTDrHh- ~=|#I$O<#HNA2{4X[Ic;]S[x+"b.Gta UE'@ xMI$Ik=N_b ڻI%;)fHTד %Y"RT8XZZޮhtI벩&'q|gR{ Ä>!dr8+$I$;6=Bst"c8HA?koקbĬ I$I:'IVU"~׺F{}+s᝿4~r>;a2W޽/-I~%޽/ry_޻"I$ ^|{7reC4l|~+/=ʏ;v>uM,O+R 4=~ϐizb+5?˯j)y{3|0+D$I9=]p+@}uw +C_+?!Z733F_LϿTדuּ~\̿Or#E+A"~ z.sjߥbߺo>FE m75*WO dƝws}_;s<>4GbCU|i06nF~r<&|y'< +.^ e%_n;6x p_I$I 1.x+~v}q'F ghW:}7 Z]<|Ǎ|]R]4yH_ƷxwCޓǠ=86u~s>Q|xL"15c)_.` ޴Bj{> ōgw3<2 qc-M Gݶ=9J,svN^7t sy.fS=7Gc8٢~VXFjM$Itf_#ÈB29+Lr&U7;ȓMik>a% Vv_h5ۛlmҷ>0Cyj3| K.$z<|cşI$Iޑ4 [+Ȇ]:x>Q ϋŧccs($0.q.p'У,|vusnx;ؙ}F4' ́:6`W8' l˓$I$# | 6+v=WR0Mc`8F=}|m ;w gktvmyL=ӯ N~2~XzɆW^96;$I$IyF ӨG?yb}':/7~K_>" .~z0\5|S:O|y!?Qpw>K:s;ct?w|i:#9pOdcߎKݭ;$I$I'Cm2ue}8V#ǽȑ#>1p@+J$Iz:*xk|}gL.I$I2qvs~a̿:$I$7B/EB/I$}p $I$I2K$I$$I$IxI$Id$I$IxI$I $I$/I$I $I$%I$I^$I$$I$I2K$I$$I$IxI$I YjfF&}E=-$I$Ij$I$IxI$I ,:!ȑQ&̼L&ˊ0:q̬XGsKײ 55 >.M멘gWHiu-^kI$IԵDi ̫jnei:24S]%zIDATnKg`vE끼XoHr"hʆ-OeYdΫ-I$I:k^@6Grk-l!@%d sPya̯4LoKA^2 |"fzUx%I$IS)CfN PX3;hiwM.&#З`@>%%AJK4L0>x!/$I$&'趞B>_$)[P5%lH C _E`q[I$I4,z_[O%I$Ig4>D4"oXi(>H8D`( JQ9"ٮcߏ9rN[[$I$$$IB@$I$$I$I2K$Id$I$IxI$Id$I$/I$I $I$/I$I^$I$%I$I2K$I$$I$I2K$Id$I$IxI$I,533cF+dz2-#GCYi$I$#z} [[J̥wI$IΔP0ph,*ouH%I$I:s}d,d~6R]VĄQF3b=o[R(g(&Sn3 ˘=5ȑQFO*ay"$I$߱dK0R,}=\ l^hzs؝YGY=Pt67@?@dMyy#5lyDy%I$IGt*JRSDçVZ.Gta UEG@ H e $$iTmO%I$Ig; }xHHd O!W[!(͍OZm" PRh4D&`׬pY29䲤9(Y|7(ڤSK$I '-_X3T#ΐi 8c%s$QVLtjK(_&$1sBi Z„$I$”,Kab,^b|z!f/#YڪN_p\~[F< $\5$I$'x pkz1z@OF^_z/$I$I=ȃϴ=8KCK1nw0fFw6Y_g0`Klk~g}\?G ?Bzۀ$I$I7 }l<З+>y.p>W ?T;'_O?r܌Ke:sO&` C)QF˘!!fCe$I$Ƀ]%v&mKx#|[͘`8o|fL83r1=-WWu[\8o՛xg^₧_&GOu{Z4~O=ȽrooH$It?QIu26O_̖alwS vz[ދGes(ןO13:'@\1L{O CrWQ_z_DsQolE$I4}XtxnzF~}Qۗc.f,?w@g+Vp_޵퐏0c>H#0s=ԇv]O0û$I$qZ&ҟݗK|ߵ_}sOWB{O)Cz~s0>y}]{rE[O!#s=wrЁ|1K$I5jm{j}~$/㷌bU3PGGǾ9rا6oEI$IC }Mgɷ>Zk4=ًsp.lE$I%w>z?~6cl:C^$I2K2K$I: $I$I2K$I$$I$IxI$Id$I$IxI$I $I$/I$I $I$%I$I^$I$$I$I2K$I3*g9a{3r\!֓ɝl9ʢLN!)9q8.[+2lVmg)P@Y]֖'I$I:<@kددP5P&f.m$.`a= -'ӵw(,Fa4h˓$I$)a0тYT2 ֑x|AmxLݏIxWt B%E3QM*lM-)0EI֓9nA4EuҙM,/-b¸(#GǙY~7c;hbB!K꩘^y S:́lM KבX3IG˨XO:׽*pt)+dጞPLE] -UuokXq7Kbf#;)V:$I$dK0R,}=\ l^hzsuEasT\OiMmw."S}(}~jt e h\CՆ?|-Pg}=[x$RW:׋%:}7xjmfV0|[tdװ'ٰ?=ݔCK9vn7#t[ٰI5 Xٲ%I$jr&,_O` P2ʕ( Gx"";=u5w)Zʒ|"0T.̧MO׏M$6uHH֓"~\Hjp`8(-n7ZʼX@ HVn)RSm\ L# 3:‰;{ o`a$I; }xHHd O!W[!(͍lLX3D4"KkVp8L,Id@>EA5멭m"\| 7ӼD*Wi rќ8.J4D8z[!m; "{7.BD"AɭǍϤvtB&D-a:V3neK$I㋘kzy=Y Cqu$2͛X^tuG޼}d2ʪ7ќɐNbv k'^h '*YP\t?N8چlTi~Jyd*YL&&&&”̛&V+:zIKuBσ9˨i"͐Nz*S^"͒NSeK$IT)Y

F(iN v28Photoshop 3.08BIM8BIM%ُ B~2 v" }!1AQa"q2#BR$3br %&'()*456789:CDEFGHIJSTUVWXYZcdefghijstuvwxyz w!1AQaq"2B #3Rbr $4%&'()*56789:CDEFGHIJSTUVWXYZcdefghijstuvwxyzC C   ?{`OJڏJ+L@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@e}zVǗG@zTj=+S˥ @ҟUдMhiPpB" W y&YGIWirAlfb1k({IyT.H_&Z[-Wx-)EwN_ZW (k3R>GzQQ^/cugV=79L_o|G&Z[RGO?U2獿]jo.J(UcU6te(t>W-Wx- GCU2獿]jo.J(UcU6te(t>W-Wx- GCU2獿]jo.J(UcU6te(t>W-Wx-ҏj|]rtQ:?ZǶXŨZ%tV=Ƽu$:Ѷ6v#ǵ(x%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TP{}v}ݾE-(_q._d3]/t 񖲭,Vjp!jy>4(ŵ-ʺG?kG_i.DhXe,#t9?fCmAEY `eYBH :H85^uc}^鎒YA'PPTWF|!6VXmdȐVך!4ci)f*`)JJW֮Rnޗ:>'|Q*=}ydgeHT i?< ~"ҵ mk{ ҂:gV߲wÍK+__Z@s;21bb;„oе ؽ2\2F|]rqzdXiyjMթ8$+k[-N(gتxnzpRTSmhO3 zy_+=ꀖ1X;T`RxAӵ=PԬo i{{ʍ4{߈)?;έ~D JY%}3LDTcEiNoY+ݩ/5.M}}g>1R)!-<66):Hѻȱ.vsŠJ;jU88I)eu&|vKn*ri;(Ii^ m; >7zAuL :Y3pd•\ƻOx^Kĺi'y:AcC9kھkK+,]:̑ r ;sǍ4h=.ca{|HRUs+ơ8٩BR[{3wemb0> ){G1oz7U.+=OO>%u+MRԜysr3ֱ.#|>6 as`|^9>7ɯ^_=lufUHc$W,ǀƹ/wy㟉Z,O׮%\M)y|bnMr!jUne;r6[ SR$9J>4W̮Ko,"ӵ? ]W YD"ѡ RpG#,|i_|'MϷjS]QÕVUbDA1׆|'7/ <ҟ}㷚HP8y$F2I<ҼAw{I`PI;Z8f"NG :,WZ)rRiΗ,Fq׷KcEV8 uT~misOտl|Ij@$Wм>#ֵ/-u \Pq.#[t~BC_|G ߳ƭXEmd:+u-} Sm k-޳ ' 8MWÇOԓ2j2rQp+JR'̣xzIfIbigmrsZ]Z~GZ<;]YXZsICuq2]K]26]FM7xIm-/[U^LY|̓6 J~*]@D{G2. pG5>y/?$uzu͹->2g ʪB7sY爛N4_jI%OiS<ʢjU"f-V>¸;]JIb޵"GU$ݵc 5izXEhSbf@ WBT1!~鉩i$쮙.%] `R%~l/Z^G? I,iSPMŞ߆H9EElkØtK QtK[3 ĘWM(IOf۽;ߢVvvޓ/"֛rIMUcY5"_2VֳÖ]SU[G+ HQWo uamzUu[9."(ԭ%ą9v$T8 rI'e?fOk=~/[YJ<#Fuf Nm>}f|X|FOM-gUJ9֓Kt[Rcx]o ZKWV(lܾX}ۆ9\dw_~?h_ kmf_-DZ Θm38P? ~h-FVqy #nz+o?6G\V-_ aKqqZ ZQ%&8䰍cj@=)e6]RThO{kuyuӥZQH:Z圭mVV~Z3W#ljX]xMBС8&呞3AxG,ִ]`71yyFĮqz'={Z<Q|+|7޶]DJ%W ԮrDkx}坦2] qxo?z{\5b/xEDmjmg~Ȧl1k{|FdZZ^-kHS Q*'vϺo~&9t;__Go-:/;ۯx@w"ҭKUR8 ;8pv>/qoOQ.azJҭo,[cq{=jOǿů[^$bLw6¡ ;ġٛSkz6ɮk:l(Iys2Cn+@NԴb SI5 yXeC#*GB .C69:X4+dFtVO|5Ğ߈)=? i6zA/RZ|G[YNAȤa} ^SX)G667Gg )4;cIJ.1ȯ Ϟ?'%7k+dXV)'W@ AQk58|#Yďakxd\\ysrpFo*9o.}.x~ i-s=MqEDO@F{^|Pmjh*ѯ$b g W,H#_zwu >Фke{ygu~#>iz7 K+/5ѝ 3LăqPH2htlڟ|1zFXj;kss3\{ʍ4sq޲|? xHկ`UU85@W8/|B͢Q%st$fF,5j~ RďjZͷm|d\.2rp[) .E{#x>Cm׆PȄp) N^%<"_4_?PؾN>Q#)#_'k_~:WU'5YV)ZܚM2GejLžAoK颢mIaKϻq;W k!|Ckk6dkc]h=1k/ m+j,ZfeՊ %̑KwXv$fOxoJ'K̟i Iyoew`: -K{>s|F|t=lxF:vqK; \G<mRNA3/5ji$ou[P52F#W,6/|W_û_|ICl*ťwqk?!$Wc,hIk#Ck7ᡉ$u-b |FFm>%ukp[k+gBGf'WJ+{{%dbN0('\85 7rd\:Sq=?N$ފL3(e,ʂ 5 ]<YN+mJD{ iHŒ(Ucc__ !|%uhkWzŌo386E<. x9ſ1o]OO{{9;{4NVIt vUD;haIJ:a$i@>q#?%κ._SǾ5z`D9QI[|#{?/tX[\ a[0&7Zq$=OȣGP'k<#:QE QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE(_O^,nK`u*zוeNKVPos \~\0 ]S؁]%ө*rSZpTpz5X; <+Ὴ2Glm7 Mƒ nryB_>o_ u_>"խ.`|^/)u‚ jlNZF*lݒaB{8);'WozXGݏܽ _*N&D- Rf?f]w[M~$@|Q"dJ{u 'J&I[QJSvFkOuت==TRWB/ƍOwîami(Pk@y'ͼ׵k(`|qF ;ROJ'OEӱ!2Ls\߄t/xX]z4̧SWUPsQO~1ꏪ4#LCN ="+ӄb>rrnL+ٻưĞ/wěݵh]7_[^3`ܝ2RM^I5c>1Eb]ե(mٟ9|8_ޡRbD9w"3H۶@Z> Oυ5ۏ xMbed マ 0[<ZKZݭk[1Ya4 w{53¾1kŷ-fI* [[xQ;.rI?+| e5xZͯoh#\[$AenQxG0Xs2}N:xqݜmo+yxT$'ܕ~k ]3OqMϬ%R!p َ\198 ~O#:9:V&Y|e]hNݨ6)Tյ.-vz%dvɰTƞzI$ջoWe|CS<fyz@H4F%h6ax|{烼D~iSE;=J!XVT!1޾^;g%'~}Ǚ7\cz38WZv+7|= R_ow?G+xڟoQ'yf1Ӟ)xേֺV~5.o.-XD\mr7|A^EgS5M-_fܽ43JyV0yW 4I'Zo'wۦ Dx%ϚrUTsʞs&-x)*rd摥,[8WD9ƫ}#vZ7s=9RP|V]YCW.//?6CwR۳̋JͿśMHuy-[&N1)a9MO0!k(l>kێG+'[:qNua~tz],Y⿂<9n?޿0[GiOg޼L/e篁i-Y}oR{ǘ>@N V Wz!^!+Ɵ?}oyco_ϟLmE;? «ּkkiaIyf?|Ǖlf-qF] Y!S" `0yR}E 0İ[ƱF @rwcxO k ;S"HU#E|aǞ(</-Y !ǢF!us“׃?gB|Oxu:Qqܬ?drNgGD%9N PH6߲Yt_k{Y`[{#7^sTUn|,wO~1|qca471Kk![uD"xݷR #פ迲ƽ}? _xIiv7U D*UHʟYi"Gx2˟BFGSSAco~ Ň}aћI6w27F?},٧Sz_5& []9$[H\?7͒6&IosY5[Twֵ|[U]ǖfbI'>6O1T;׷RC$V:!!i|`OHeaA Aigga;xWq D@ joV^7anC~ c;]B[}g%cs|I5|Rg˟h6 *: *N!W_NBB ~ ]=u7/.+@p8\DžBsQkmᅳ?f&׾Z,kD"? q°!Q-~f (g[vO׹ג?{]H5(7\}zD(@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE( iWY7=Lgn*RPw3d%s/K[먭[KNG/zKQ^=n?+V`/9Bq}/G!z_>[괿 E_"(oS/K[먣땿>KNG/zKP< sYykV`/#X8Q|vrx?NFIܖbYrI䟹]e:Ӈ\B9Bq}/G!z_땿S/K[먣땿>KNG/zKQ^=n?+V`/9Bq}/G!z_>[괿 E_"(oS/K[먣땿>KNG/zKQ^=n?+V`/9Bq}/G!z_>[괿 E_"(oS/K[먣땿>KNG/zKQ^=n?+V`/9Bq}/@f&I;^먣땿>KH%! _"]sҸ|+-Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jd}}?J>Ⱦk~y~GVG@jcUUrEob@"Λl3F}zT:gN#Z~_`4F?GeQRҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJҏJ ZJmGjyt([:Sjii6}fGڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RQ@ڌ{TPx%=ǵIEGj1RPzPW9GY+3#5tQEAaEyğ> MO el[ˆ5I%vEײX|)Gtxn4Ow{Ɩ}LsW[4狲|j>M[_3;p"wk!7OO#wiVڭ};B/?f.ga$Q\N.jI0v:仈ܪI x4 O Z\%/~ҋ26GC+F 1ֲ1T{-2g%﷙TW?># Z׬u筬vv:`3Zk/k+4*M&)#hXArOy8yRr掭Y.ta#W.Yh՛S(,~1Nj4GS9 o —h&?!n>MU|4?蚦e5k;i-G C `8{NSjy0x9cTN駯emߑ&xO74 ^P m@ZH-0썒Jz dE.OhpH'hb?t?Y^1ԕ(ғwI7oS8ӍYUm}5*(f-YbCC++ 9TQEQEQEQEQEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER(((((((((((OٷHҵ̖ٮXomH1ޚWtW~|i Vk6$Q!A=kpQE((((_|QY`6[`kasm%Eqpe^"*gIQ4(Op!j^;ݯZ3\IA1j<$ (_1W|o5,r 5<>YIZ*?S+Uw7-QE8QEQEQEQEQEQEQEQEQEQEo Rо*KL- $K8eG@LH.~:<q?7]W.xņK=N-?G7rBIEy+F_aEWGEPEPEPࢊ+)CKHzNK.]\g9>4/KgltrKo"nXZ8 bGP]{F!Ӭ51C.ڥ$@ʓ͸ wК#Qp9jU=m%im{?a^Np!8U>٫;};󟏚E:wQQޣp;أy1fT pZOGB::.݄FHayf$@ k^|xW^-uVQa&k[vePy&?zG|*k~ &oqi յBYm9v\oFPv *TiTbcA:zXݻ&v#O8O!VIJJVJ'e{?QgOjzwK-M5%"KyF.Tt.O~]Ga!Хh/k J_mMgV*HJ͊/⟇_G|DW-]5"IJT,f$.  ?gֿ|WΏ?5[h.!N HI9^\*֪)WW/3KvLyEk;FNIKZ[3-ڎsuom,wlcjQcp&X `c!X]kڗW76cdfU |<~麯u$^E5`f!P܁7?_h.kn\KI%F<;#K W.gom=m|,U184r*Q)GI8٫0ȿ.>x_Ao<1kqlǟ0x$-y4:-MYu4id(#igpz$_:~<>xFK Uֶm:uNKI, 6ޤ֏?߇qў B18NA'YV2봴5ӓ]"}4,jٗԪ q /7l?{=!lR;mS#G3d+#p1?||G/t]1p-ܚ}e1l9JIH?e~$!|D[UN66>`>g{.ݾbgxf?< %fXs!=ƚB"ǖv$O'ui=K|Quie.ZNaFC,(Wt';??Gol>oWy >4h!Xyn^4dk7I^% cS.N3 0E՞ 4\d:Yd9r_ܲ`c\<=⯃ 7=yei,RAo%q"cF6~!#Oxx/Vhu+u09G-*9d0  ;1k_Yԓ>t[ "+wyʣ7ƒM|`#m[ؿI}U6|7]?iNU`G 8)^WN+%swVҨ 8|~v>`x_ş KXo>YWvw.J`Sqz/^еJ5].uJx8vcMȫ5?G|]OZ7 vj升6@'O>5SE(&]O/n$E l6n]/oyK_uy>%eMeLK{ҞSΊgyp;qG翄u|7\;1(L6[/Wk[n_|5Rkx?xN[8<;E%$V yԇU[hals#|&wrۻQ7ܒHEtl.+ /h8e)m8+{V꧰a0N*.7oxImsv? ?oj s <8l W  qf>E-V/uuք9Hˈk['5~Wʔ ҡ78;Iߧr8Վ_F5IE^+ev~EWzEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( ( +/(~-]>xKrHxQ@pN  dLJOĉڭ>m3:yچ  t#M]cO/ c?,'6Y-WQkџUTn }w~u}f1Hk zwԭn91bta؊FV:?+ǿT?蟳߆#[I)Ux~Yæ\u"fFFǙ\rb,jy rthrcUn$}O4JLnhypqK;/8Ҭwvmk{Uy>\ ~~'5ٟ^<m\qCm!6+sFhVE,;ʛNjW>?xgE"w"I[97UI}c##*E2ĸP`9qWϦ($;?|HѴX<ٙdf`nYQ@,FH#0>%kIZYiJkhդi1jMa9$OUqwN]ۃfc#[Lr{m^vҨ9(+¬JGןU Qi)h=~\`q~_w {/֤ӆe+jΏV5jc&^~ٚU᛫-F-H&h[B[F%b k[To4j)3Jk#/aҿҿBi `M-x[Mc_f{mWYVtʼ񣡞h!݈mć;$Gm~(KCvndrt끦8UX@ÊғW?j+G'Ziѝ>*7~߃j}.mtiv}CI/!s<[C|;)RkPRL=/Kotu8'*ۼw*w1ǜ<%<ǁ-Mo@t&k[Hwo =ܱ07cp(O9E~'Glo~/ђV1[\.o%-JWˆ5a9Lbݏ +kOVxö0]j&]+BRj?ۣӞ+ϣ:tZ[[YOkwٹcqWḘsSO>ǎuW6Vn-[Zc,Q9$U.3%Tݹ~SVxVUdFȩm*9bO\gD|Qύ|sO-]fu[{NӅh9e ~SOxV_)xlsRclُ'~:kCmMj UXI $aGEoJTW$)@d_WCS~͑<6M%ϊ/aYXf4#fWY9ڛvDbݏ:+ lڧY[˿^%EmZ=yB=⽧WuFXq+HPʺ$x% <h~ҿd/.Oj:uՅP)$0 + ?)=é>x_.;{ij~V~vq f۱W||]bm}^*f+y\F;t}U%湿Ro7nu'| #}OJȋ!vL^O:?J+V3RR_pqdWkH<zgU[B-'h֬!$`|}İR 1f$fqjIVׅf!ƙiFx%T^9V9WӟUc;_a~_/ǻ/> ZhG(Zl6ZyfbIߞzoJZ)k~ 5rkKR~wywpII,rIɯbWPǂ>:WZNlYR'{[,[sUUHrQEfPQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ_`|XQE|c㿎,{cKżsxFn$EZy˽UU@%2$ʾʕoDݷ3 X:^֭I%m}E|_~ؿ tI|q-}СuQ m3}'s^Kxť?͏WI1$?tc!]XtbRq4\.q V;])[U4Ӯ=ϋ t*[fuDRcm/ wmapzc/[jV3cf!# Gc^laiťz0Ra$5h?~&;?MҮeP$Oo1]."Ӣ }Fq-eY|uá yҕ (}zr.eWů/YZigl;|f;ǴҺ x2:5[=^[cKg2̪N TաiA M)K2MhYxSgن ΐFXP@,{ɬ |DH# ;W$V),$b ]mv7%gEr׾8n sOk8XKURŏk7ß>QHtV0mnB̪,#,_Visrs+k;+'}RI Ͻ 衜XY?Gv,+H.RYQs;sHJUQ|STܗ7kQY{ /AO/W=|㑡EW{KGhjZ<cFWNUG1'V6Q]^|DMbft1B͙ea-uW 4Zboj\= ׫4&]MQ euw^mz~૘':mP$)$N0ӭ$ zE=M?Fh*z-akJ魷iT֡E{.|T<~m$O1D SE,9*?֟:>^sńl>2XWYK^hE;8fwz`XtytR操ԭdѻsl>6 RN&+d%QQH҂3k|Kx7Y/R Kmr7Uaʰʰ+ءVXq)&תNkxcFQI6+Okqtx>K܅WHUIGN;k㿆ZzGk"e/F2c Fr3  *)5tmw{b2lM֯BqvmI'npWx'Zwǂ1wXe[hr I5|px\w h5ntEp$BHRD#r2AƔ<J N]ETW\NT%M'rTQEv!Ezn#~gú2C"j BʐppV|Ur5$NkFQщWB[I]=SaEj&M^Am%/Xm]I#t~]O~)[y! taceBu!׃8 ~\.?NʦT~'SR?lr0UoG3>xᾏP<=^:uVY<᤻Kefqj_xW6g!aL6.m^vq d#&ҊgQJww!˛V­˟߳G TԿ*b٣j__Q[fM@\sh;*%WY/ٯ%. yiL ywpf*]BM*{ZM;om-ripUOX{8?ዿfU߳G TԿ*f?./'Wɿ .~'SR?ዿfU}GE6qAu?97Sq#b٣j_G1wBwU5/Jf?./_&J.?| ]4НMK.?NʦUa O?ዿfU߳G TԿ*8?|) 3Bڋ|;ѿN!W7>g,vq zWVTurݶ}7zzl-55GI%eEVEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( (?-_l7z_iisw m5ĉQ",M6LqܯTzc&_@?jI &ZC?M?_v~%;l[~_>OBc׵X!(Hdnc[>o7%^ u{[&Aʋ{OOZ_$'ƒ|@g=Bm;b>y20#Ӹ=d~ƞIMGƞ'᠅`G,q43'U|Aٟ<+j4Vvdi<;Q,p_cڗgY7YSZԹ6?~ {z/½^Q-' H3"k>x5#w_qHkiu+WIYBdT1?/RoxOMg|w/=ʔ5Rc|,NѢx?৿o{DRz'cHBúk;YcFE~e# k][3Oǂ5^>&XmG<C_%۾Q?c_ׄ0<%L*\zڍ̸Hd7X ,^K=zHT_wve`7xmc[t&oa6qu4ڥ '!FS&#v+d x4e ݼY|l}XӜ*QV?cw i UHSam p>J$R9p?jc&__t|ya'W맞\Wi[To4j֢У?χ>o=+~.^״x{K>'a[:<1V|FE&֮-ŭoFd4/t-#Q>~DC/\ڏ_νVvq<QFTI$J6l&? ?ۏ_&~ŧ Һ< ['ҿ_ m |+u@Mzͺcz\<߭8?R<MvͱZ'+~Am~[?hMj r[FyO׏' >]|&׵/F{a,7wqݽm'iAk+0O˿FģP3GM_^*_X ԪMhHU+U2k_4(=[$I#m@_֧&/3VИ+ 3xÿ~G=ܾuO4+mvFM30мb?#_֥*i~?mOWSxUW$!Ky^HYB+ CS'~;6yϓv}3;b8͍cn|dz#.[}9흞^U>%?lz>)~gul0XHQd6v5 4?')i0@. D2Y%A=O$ש T*TRPRv֚캞^.؈קNQ8IQzx׍xOܹ9Gim V5G]rFFdi~lN{ MWxFec#0<G/!½7LFs q}R4,˺x Ebsc|Gⴉ']Zƪܟ"K?~Z"ɟ< k;9˹ 3}1| Ь;}h^zZ,Gd+ϑN9'z NΓRվj%Y)_(Hf^bq@8Uiʪt=}.mno3XlHjOmc:5t-Ѿ!xoм%jBq$$G/y`| 䪊ʯe>7p|i %ş͈yo~}6"^i=[eb0}\<]KZcѭXn@HIg99$$X5x猿`9Qu?~|oO_k[Guoj`dDD$'c |IaO֚ao K^)Z b2yJ*sQrT6Ԛ^$*7N4읚OZZ߷[>o߶;6FI Ydeb"!Cϊ mD5/"F)0UߕWc|I[_o,=8aOxMV -^5JeQ̹ '_-Qϥxc^\-'24*bS;A$֧ ciJ<۾[5ս=>G<+Xy{iUn2s]>nks?C/t KjV^ӂ |WFd߰ iD:~xWXT7徥mD[hK B!xIGz'/_ 4O[iR6 SxVB m _;ů|HeoijKaeu]2)g3I'H~eR]9&ҳi߾3EA׽mfoĿm;1Kyr4FxYu~vSNφ 1Dyw`3O|@~^GN|::y^܆IJ!~#n'T8Z^{?ei+~Yh_mNVUO{[[sGUs_ :5έYۏgM,2;F ˼ *iD:~xWXT7徥mD[hK B!xIGz'/_ 4O[iR6 SxVB m _;ů|HeoijKaeu]2)g3I'H~e|=<4'NI4~'9:jJZM{ݭv~QE Wi? x^^*EyY>9QEx9xƍgK&/P%4k?Y5y}t_TyoKa_Oھ3$^Ř4+acd̼}>c#z8TI~wW/ dẐmA-@?5tnN22>s1o~ -ʴIJVYYKu0x|C$F2M$'ÿo#V:K-ϔŚ!@>ZJ+͕_{oB.{] &ݕmee<[6"HPL#s־Bxw I5f]5՟5MkėvwR n.T|qk+1/'žb j3ZVỻ"+i.$`$ tx,*tԍEQp\um[s3c0̣RFJ1ŦJ:YZ?IWS@>4=%6XNkڛ>. 𾮡oc+ټFR|Uk >)#RviRC,qRxsJ,V",uR H#fT׿jW3x>i #KK{<)EJF ;Al}MEJ=v#X/a, ; m4sb[ 0 Q| O¸<xgxdx#IGn.1$p FGM˰|$V۫7T՚ޫWycqdjѮ.fOr-Džjh˨Esuw5UO)XyFX1#9O^x$.uo6F0KyOQ)EvUwxsŭ7_Xt;_Ib("OoŸ W>}V?g_-K'h-ċ¹<VKõ0vSV8gѫVͥ}6uToJX*2.t-˭s-6/pΞ]2pNv9tZRԩyJߏ?\J"o2~=6e?"x^+OE_ 2eI =E-TҪӖڿ&O߄YKi;ލ .ٳþdk7:hb^K,g̷X`~V8EkZIFLY\|%?gx;QOk,hXqn{yY?s_I7Z7JFQ_e /_ xfxm.!cOn1+BGb<ҠA5.*Q_ċtz梱9yY#E_/+|<=GNŹH&#B HKox_hEYU$mdEWF :'rY'=$mg(o9Ua2)~\SVfrNG_XsVԴ=W.6sqp%h#{gi#Qj?7☭%;@lt=>fV8 yJ %T*-s_ ZxWI,%ުY[B4A_i|_`_1:l>8ڼ"R{]=b0`NeQQ???c:o4i4W'?nt+y.OGP%ֆn&nb8?__W|kE5+}*o*d x6I%FU A5%&U|8޵ңQo۫dno I(T_/O(°xv)ҵEʹK.lGS^jfۛ;k{BSE)ǤS $ۄ_-O:ԍfEvz $_֭O[]o.-Wukgw56{YNHmRB*!Bddӿb/7~9Y;h>RMmO.%I]$C_3h>a[!nb2JyW,ލ 5";yGҊ? /~,~~-íj.+Y "u[To4j8k%]gZq+ d[Z`(NY+a"%?(gG'kZڰi0Cs$Qq'4J`~ Eyn񏂴^XKUIot(:#D3u:l}/ʵԭ'O!G[ =VJ頂Sv9!J:0Â< F߈~i>ۮ3+;e{]H@K n$$n̯L [~&|-t m2&u={T+k jvIfZI$vm "21"?& O }&t^FʋaTs,ٳo'ŸZߵ/|.[F@]8t!A+J AcnQ~$J]<]a-LDI[̱3ʴd1?T{^_a.!˪kwv%MK{dHێ$*aBaF $fɼN?۷Es^ VxQSqxGNf[r(/|[3c“a֩B [GT4h͠C ƣ#xkZI8[88Y%!te?~> A6~\η:罒0v,"=Y5qA|<BXkm.D>)qqR\[gmO7NdiүIjo_oyfhZ=kiQ%@|.䕲Iҿ+4ނSJM|#Ao:v?z^qkw'+ϽWm,^rZC/~ ~/~%x?BԢONc˶C %q[?U?lY?eq@|+@ac*_okUV:PIKRDofRA5|v^z.>؏xbI$FH $Q7h-M3#?gW?3_h6/Sm. ՔQگ$eڬ G}(_ cڊo>kmQNR~^TOBP+2(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((袊Š(^QEUZ7RzM*PNZ%QEfhQEQEQEQEQEQEQEQEO/W׸i? x^VkDFQ^!-?Yɫ>7hM^__9WGm[RQEyEPEPEPEPEPEPEPEPEPEP'NSu~W';^[DQ_Q@Q@Q@ࢊ+(ɴPǛmk4ɸdn LԢ5< HџuM#%BOX㵀 $Qy`XlgOIv }[/.-)jBQ2p GG%>=T5 MI* tiJ8BctdS,kK}OWf-[[Y9cx}R\|^}{G|$iˎ 2Bn( }*(#"PqƝ%^8Ӿ'V47ec=;wʤ9|+D~+4м7k{N b C3vA+*I] #)GztK BJY"Xbb!JJWW/_z|S(:U1C?Lw~п)xj7s2mYE;Lo#$ ֈ>@@Ğ1SV=?(O-fhȻwt/_FmJ֮VOm`$HCꐣF~b@Q>u=^gĝ'ᎣcMg3u7) aO3_4xKVݦEo ̰إFG1rNǟ;χFi Mj+| {Ob ȇΒT3yOs0glQ'~=#&eE];PؐH8v[wh-d /E|? ۣxzi&nuռO y<ֺ?s㯊~><-u${3,pP×}WKs+Ꮟ~&|#|-۵i\to+T;G8wwMe]/E Kk2H,ev'UapH&<| 5t'4:+OYGm3mӖΉR2lFnFh/-/̖u=<>otg>\ml@):2V:>7Wac+^'#[#$mXB6$mrpF;H'ۼgBπ;{ֺk{]q :2t}gE~lK#ƞtZRFQ_b6 پj[֭x3MUFQ-Fx*1/#P SLoC񧃼Oq53VwM7/e=륯/ퟌ5[ao}tF4*m e$Wm*2 ((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEPEPEPEP?Oٿ;|H' j:vP;uh-㑑R996B~o\j%ԓ^^I$<ҶYY䓒I߱^[Rnϛ!;r fbrI<6cO:FWYF烲@Ү{aUbϏ&_VW~'XIx\.J$!9*Pd*QXNN줬QEHŠ((((((¿,VO'|jlsoM⟅D@lMߘ gc/9J%_*7AkiKkdl%_3ȁ#` ]=y_tk]wGD|fHQ SЃ^g ~V7:+8 edI#!حQZeSկc#]QbpNv;?*|>>@sdd=c |oF>i$r15 [ZLz{ LmԈU@Os*jTEVeQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@>}5mc*^٩Za._/[*Re1d{{sn?vgW*)?f=_~?A_o<A t O? /+?/?Ώo;3@gG :>C?G+?_~?A?<A t O? /+?/?Ώo;3@gG :>C?G+?_~?A?<A t O? /+?/?Ώo;3@gJ< 3Gr?8H dw<Gt[[EnPF++Jlt `z¶bVvG:)nŠ(??k>7kSwkS]6=7$Ł@*WU|I]>>񾞷dRc\c|RQ{0WšGDͥ?,wzb]Hq?S,X xlN RfrqS6kޭ{&tYE~÷nD'wWP *.gOU(GE  zE~÷nD'wQC'$B'o&~__D?ۿQ?P *'ɟW;w'Q?vTOu8gJ.gOU(GE  zE~÷nD'wQC'$B'o&~__D?ۿQ?P *'ɟW;w'Q?vTOu8gJ.gOU(GE  zE~÷w73Wfrƨ%*o?3GG+Ir}I|QE|aEPEPEPࢊ++_,ksγkmBMDXLZX%uI&U6ہφ`6?yX8㥮^+0?_g^d۷o)Y|Aվ gj.uJѺ y&*\$lqs^GH.k=[ִuV1jK -m1ypKe$b][P{?u'X!'Ehic]?z@vB%#J"cܜ/5C~#ٽ Uʨ-Ӏp=+/:<GYm6d 0:(PTpk|&Ե^:OV9.[9̾1ʣqr267)4ӽyX;ƾgÚ_84弍|ˉ$pq~<#|?x3o (a-xopHܠ1q[zk[&G嵫- _Nz)s%dBSykODJOYvGnNGZ'/ g@UzO<ڂxD%8$`g|RZoYG,Erƫ" 6Aq>{V]пg76gmɉFE2+ph4FG9gllt@qE{yíJ?jh[h:m~ͧąAW;}k߿hF?]]xEHO+6yHq p2C*>k^.[ź1 <7!FSWAySg 2 _rWswȷOi]|A}kAW.J)ӡڥqZ]mԵbbmuc:5)=7TC@HX;N#ك;2@Ꮓ l{⮭ 8cIJ: v4jllȮĚ7R7UI(Bʅ* ۇjSg~mH!$WxO>+x+[^WelcABT/`Nz!%|Do(*'89gCѯ~)iȸ'%*O`Vbt/{FS0 ͻ.k[/ƾ/]~GBPΓc'kgq_5-k:T!mFY$ p]X,J\rOgא}y#FO)pᶞ28=jk24V,EaEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((jٶ2<.vsM+ e(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@Š#=w~sx#W:դ?h b9$ `j$h\Z(7⏇/eb-8jm1D/Ja{Is(*O '^o}.>w%!!d:;/Q蚕2.x0LR@v8*48\ע)QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQES[:(v:ڶdy{A6 m[5QH((+;Tմ^+KhUzM4vDT qs董E|~6h+?zU䏉T&R+_io'#3" WIS>Uo'i'?)?+_io'_~+" WIS>Uo'i'?)?+_io'_~+" WIS>Uo'i'?)?+_io'_~+" WIS>b CBaYfO xyz-7 ֳ.pS;$Z¶I^q{9O䙕EKNoI&GoEW!EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPࢊ+(+`x@W?CZ|HRٖ-o5igXt"3yp>P2rxz(nOW<1RyOM@$۴V\ۿ[s^{2[E]DL)mE%>쏢|"Ծjs×Z)I8AnT)ٸ$}᥵c N);G&eGq,v}kz{ m>5n-]Iƍ9$_J$)YPU*]pMxnxr[B6x/61ȥ]HaQxz?kvFjMwiy")V ŒWh݌źlj;(7P<պ2C(# Uks$ 3<'3ܢ+(((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPGSDu66ː9QsLxO>|emo:F -*z@{7)n5{ DɴR.Us|_[` j#sŮ-%ѕSѥKdWcᧅoH՜G9Ԟ_[MmfGE|sqO﷙˱O%N_~kOWWտmCo4]KsEwKkpN`nXH8`FAJYwԣy>Ho\oFLqє#$ߩۉAk+{2K]j+>*|ve_9~!j`m;y7"G属Ǣo:G|?xMm)]̡`N*x Z*rpOU⩗ኩNJRi{=EyG68񗏵^ԞZ5[O-fH%"WAmE+#ht돈zɪjRkRQ:s/mOϙYu5YF9#Te$կtvWܿW W^~>X|[_ Wmcmv, <ҩSyqte{Mi掺1QqUp3$EGTVFi't貙Z IJ-Qd 5~Ԯ/1eK+$H<#DS18r3dO.TS:rjR~nWTW|0y|>?'!Hݔ:&Tm  2OdtRK[4=6Q_|3 _>;'̬5yF9TEї55y.WxVy*k{fKYc`$lUBPZ|dӺ8ao~&|-YxnmCQߍgML_X†KڢM  o /?^A.8V\n6β(Q^=O Pzy}~[ϺqQ7 sbhOV*~]&a\1YiU1qiJJ4h ď U𭿋'3i3U5x\w AW7^<#?5-;/eJ݌ΊXT݁-O诒<LqƏ O[Q*}fh@pb`ΤJWǝko;MQ(h"ɾ6 TC.sH}j`VQ?džieHR\p tOOִ]cI.xRHP} EL;I=QE@Š+ j>2OCڔ}ž>\Ďm3b1֮ܶ[g_>6O%?^xSO0I)'tGg)Eقw (aEPE|> տh ]#Eѡ`D] BCW!~\ww +|iu~JҬcCʱ #2Yc,[I?Wc7DFwv9yzSRo'rZ(aEr7 ~1֣[-*>dUyrI§x_KngwNeXpwN ::(~+k^*9𽽤,7ۦȊW`3fܶ[mQ_(x/5?O5/̫gEZ8Xn{ ݜ})BQ(W~A:k;-vLoRGA L $c<;!7es袊Q@Q@Q@Q@Wj?7|SCxu_OmJ R46́GN}M E~0H2}/.[s߇;!7c诘>-^!{#JiIGVF {ypȓ;Rq9o?jFH~(*9}B)l™>fO`zU3j\hXĞK7PMo'/~ڟ#xzŞ/}"Hnv8T:P~^1W_z%7-nwzVC,Qm\`kOO q|E9{e`^]6Fpvg# irgQ_*#xRxVڂiڽvvBͻuJNUM;0 (~,~^"( ֨j[;-=U+s")u]PF-聳*+䏀Ŀ >$xZtH.<o2(U2dgPn|sc['w<)k0}O6 @J1K9@Tw}WE|9v?_^|<h༕# v *ʚ\O.Xvһ+#:̧{ZqvI/z"o)(I]/Re[BLvNȕ_|9KCkg΁_-Ʋ:LJM>6`VEbI]QKʕDi{1q<2F;]o:C^g? _AimIx# :8 ٯٺ|MxsoͷI'Mw^)ᎡsFͺIE6O,>`@N9=*<[ƿ:xO˴M<3lf`D pF3+xC]Ğӵ {IB-Cdg+IbIJyZ #<%]o[a]Gs$tyF\#& cƐBAu.RnhC\FۖG97mܒw)s=AERQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQES[:(v:ڶdy{A6 m[5QH((7?]X Q>f%;-{:j O v_At 96&8[W/퇠k'|QxoNյӌ60=ě/ݶR *&gp 9%RR>6l3Qƻ}s[  BHB|Io/ړW jT"ly֥F FkoF? 6csV+) b ] Moj/;93M .r#cJtuֶL<j>mJV6|$]|Oaީ{#O y)gr@M+ɯo٫ǟ g? <-e6Ok?'xO7q5>-BĞ$_n&[ cYDt:n_n| our^[ȬH pzEO{]=&ax:;촱VQ2cP=*W?ĚuL!!lg=<;I]8Q޻/ c;~4m{QgEYdg(΍'nzaXՎ"QM綝-c'rFC)K8YsS;R۩'o:ZW牼~24y- vpLPLќc8_uo_xWl1XGq{4sGm26s?u '⎩Xᶩh,m V݊_>}V :3ݷ3f2uiY/gT⤟*ONnn?xW>%x[V{[k\X-;TC 4*ʻT`Z@м1:'M'Nv+K2]m5T]KI'ρ^h|JNjZ--u+9{j"FK r#kn  ֓5m-%)'Z4e+2⼻I{Duo[__ o?%wO\Ϣh~,Ф)82d漏RB5*Ri*PIz?_隝w~ )+8WV2#1_}ϊ SEIYj%,b22W8=ETiVU^%|+ft} SJ}|x5 Y-m6k S,,8c}# #o~1x4$5sM̫a#ԟoUP~𽷄4-G]դycl弒8aWeRp2x+ [Yc066Ѵl6+v#;U᰼/RVڲOޔii6۱EW~PYi<H,]L)_A$9$;iz?޳bT* k ͟NoQ3bMoo<9+ޯ k6W-(f(eRoeɮc炵i4t{oF]7B, Y.vJюΊ(WtV[1W}+Yu}.\vuw5=ƩAiCGVwP=C?k4op7R#8dEgӵڗմPuV-|O|z*&9bӛq +21ۋJk n!e@YE};τ7V~6RI`ۼDQ,+?ho <;ExOX5X|_yb Jʣ 1n|}^iZȅ`$sRY% E<Һnw׌ GtVE"[hc&LL1 -ĺ >Wnu˛aۢŐ: 4ٛni?=Ծ#]sy`c+Y<+R y¯SwWimF24R`y(Ca72;22c>?4=;Ɩ-wm9dXT6V>TCxOO-dO=ÆeKm$d㎧~,'_w[rk~ ohNTWQ-ve ;kM+YP[%W9ã23(FIKi|}~ᖳ +]g_XZA+V\@CUp蟁<ǨY &'Y#aFGC5sԼ?]L\Y}8Շ]c9Ǘ(3<h|gZhjgm߷nVG1|/񎬚MzXÓ['/2.ze)j>Ws=Lss_W;ʿU֔QHǵ|-U/47 \4[],ns~Kj'ÚFuԯs<y$" z0co|AAJpQZ>yw_[կnRVXk iX&{⛯/ a"_ǩIaq r";*vrseMMMU%9CnTa&.F*A jjO i+YQ8g2ΫԢTRy%N}яd7?}VxLJgxDN6 mdmpHei]vNJk㏀ߴ-YBV*˕ ]~%D/9O?d-ngk|53k-˦RMO lK5KZԴt k2IfX,lpJ|,~+|1ŏ|]n[ A5ora&LA#`0ePk|-͟f-qkφ57Ȏ˄1`|ICCw{/Qz1w~^626iZMk_Y<ˢ_J,x b'?/N<+IS0c/XbxOcgl?FX_9LhAeu/-W7pp@=v$Rw"ܥo VJgqIx'ha+[!KL*"ml^}_#> U5?#\ p̐UP"G1|Wˣ|]O^x?`h"1>R[89qh?4B*[0n~5|mgωu&M֣'o Kuh7+]?+;s^,?Þ|U'5M5՗M2dŴXJo.>/4m2VԴȐ(嘅 <keڥ;m:+{y*DPen8 A4Id}\Hp$ş0}c/ͨx2!$"O'_i oMF}5W+"F?$~UC<|F%<%l-8.o3pd*ry 蟳-_D|_sfږ-6n+ɒ ۳#=;Gg-Q:Vzm>ֆٿv{ʠpx*ڿïxWc @aDi&FDU8ROED]3f-_ۺ?Am'a\8؃ o!Eo/t5Mjm[KKv;$w~L F@;|?|Uӧ]iveysT#׷ %8;VslƟk7hRViU w QJ,>Syv2ė-kk{\IFc 2Ua${sKd(ac}߉o:M&Mh䳕$RT`2FQ-=Eov7|y'._.X4@)2"IP r[pn/|xվ(~^hZ+XK}7T2Pʸ G9C߅_ 'ҭuMgU QF'H0h\E(|Z3zNq6-ŝ<]A.8^p2CwW^?->%x;gkac^9ּQciw :Λ\<$nYo `CEvWQO0fooKo|Aq8zsiIK(F}rI*ݑ?~9韴VekkV^Ms h q!r YUWā[X%/QA'𷅴tMgN$+eu]J$>k{>]!oz?|pxݥ]^H icDi%p?|xO[veݺ]ĻZH$, %Hsxp-|SosPUdV唤H uQ-ω|1S[/ukV$gRB)b@Ntʇ[4MwUoqM O+= i2ᔣ Z5k7RxA7\Jv/,]r-䟐Wؾw/\y/i< K-p;923Z_iKςOO C 6;3=50֧| |$e_亞S}33E,[ui$%trz⏏^}n_FuiQ,(`N<~?N-΋>G?M%9 ;\F_/[˟7Z𦔺ŷùPɤ05VXnXt_'[5o[BuO4!K"QRdT\;0VA*W~П|H> _񴖧SխM&$P71msy]?_Pҭ'Oz N$$n* 3-\2G_e ῆb(״gK4u"6"04spjYTdoxk8?>-<]:6kkw*d("I|Xd 謤>qğ7-NJߥa8z 'xSV:c[H!2~@1M$gڲ~|P4ˁKo%Y<3.‐ך~LfOf/]7a\Is(r#_#{{ ~ZFIt\sH+f-/ 3k$|S=Xߟ=gӣfYJ|Q*v[wE&ߵx Su=W\gG{&.&R V [bq+*QB>Oo / hz+iHOwk8c+RI_y[rS!2k՞ėW9J+2˃Zҋp^Dɤϣm? i:G_~K)C=V.Yd *¸|Waᗇ4MR ]XM{J M>"2\F]9~x,Z Q|&qOk"8DrfB̒0^UjYZx:&5-u-#it1lԉ;ݣ[w/;+/5/nn!{EvJ O#dd`.msǞ7S;{VӮ.NQLYpO w#b>+^#$DK6Yq Gu 2jc_c:upZM̵m8D*/PHj]wؽŏωjڍwQ!̡a'}T6T#O35 dr+[\ !2)>*~>[߇4Z~&a5oe}g,@U|?~x:gtL;/otm8_1"Sy;kzx]ޤ`:XM yL2$cbPX7? xXFkuƓNE2Nv_r(8ޅ Vzo^]Jm+ZfXa#fs\H: 8ً['t:_>/tؐĖ$N2bpẍqҵh߆m}XP42HF9ϑX/q߲j?|Q/  nXtqqB窱XG#TrNs>)O6E~/ [i ćꖮkO$S\rA寴/k _IX-5c e&+n6p?>أ^6}bU)t]|V]/߳xZ'm4W\Lj76 [$O?EYuGeqpyQvBg+nvρ?Ѷ|7Ꮕ_⟅Lj Gr!`U68!,,Q-+Io%$i_cjv:m!c#;2 ~'ÏTestm$&Ɋ&, ;Km"+Fsuw{)m` 1lcO/=:u&۶C4{_|-vno x k%~T@!rV\j|&м&д4=;E{ݿqpa@lR>79ĚC—?6־uZwi,[H/m47!*y ȯh/?)smk ?s؏?|?Zz}in]ź]BH2G *~u|6|?jA1- @ vd/݉_kϿW}C'xykzZI*ClD$,A#YViʹ\8OW |_#5F;iBHd5oOH |?ehxV_*_jMw4%Ei /rWꗌ#$#lA5dxVo ȤSG<77?i+mK?7RM6VKF;/reRrUhtW FthJXH}a$wͱn[GR #k7Q/6G*ӿO_ermݒ(<pq_|LXmŘ Io #zlYW5{Ά/[DWZh~d}$vSzje PѳWww;2|Uo{d1i+>E ,凟~? ~ /~垳_|7-k{Kտb/$/iZݔ&+[i^ @ =ˁl)yqt _3ʼn<1xFɤR֮MOKPF|Bϛ~ЮO<ꚷn ۙ8^YU}t8qcӾ$fkta3^SX' o Y۹+ x$~!--~Eu-丷IXZ63-n&u/m>+v&/ n}+iyA<%YCf˥Cj,)pj>ߵq|^Ņ_  w B0~68|`XF"zo'ɦ\^ lf؉doPI˫eevJݱ' ;kߋ5-+zNrc9(cB"c͸i'~Ҿ>㯍kSxQ{u#mUq#sF؀=[7?ߵsxE4x+%qi2fB !wŵwߐX~<_"C8|Exd͈2n_rd]CkmZ3ׁ?f_x3:XvFۓ$H_jg}9VKo7sK%7ιWܿ4|GKg|9KCkkɾ sb0׬׋6{Yo/Gɟ+^ۚξLZuێ~Ҏ,1?QExQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEࢊ+(+`x@W?CZ|HRٛQE@Š(((((((((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEx&^I Ei`oЁɯRs*][Gi{)-Jǫ)cci>9icUџ|¯YԷyF[]7mg*][3hE2Ax2WWAKM?QTsQ@Q@Q@Q@Q@Q@vMuZ}Nz,1 {_~x[.|Hcԫ`ܸT&}_XVҤRV=s,Ҫ~z͙7|Mu]fdy1oFQ_wN~!4xQԓ՘,zy5_ |6^sgu9oYeI-\BF͈;G#  YZcA+F@O\TQR0a # r.Oz㗄~2xxO:lڽݒG] ZχҾ/IN^aZiV8i$fr:p8Ҋ7+kp*FDJ:V )PqE("4^>TQ@#*{E-Q@Q@Q@PҴIQhNhBKPEPQSbDn j@T`0W`k]B+[G2,~ECoooi [ĐGUJ@`UAKECB h8H*pLFx4Q@CƑ&Iڊd8-P_`w;hC+TD4KzO!vmQr1Wv_]4ӽ~?0GMSlMZTޙ3 V+ V+57\|o'_o $9m37fu)ukѽ%0O%1R#'֊+sLWu2mWm%]>gyN/aM+(8((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛVE,ΝFsO=nNORՁ3Wf65!4it>d}x+S{?4CVͽ:mY/bX(`˵`8$0=@oxZ"VW|2筡o"hC4*\>&|5k am Fs)=_jϦ_h>$:u+MV9e̢m9>(>v=<[|~F8C)ܽ/h~?x)gOj6Ϩ܉#I(74 ?!Rd})_χ>&h UżB2$mLMU[z|/Z?O_ M-4Y.5Y'ib Ux RJ'Z~'xAҵ+<@syo5 F ETrÓ3mٔWɾGN^ hZ]_ɔF0wlXee '3|J~%\)Y^ 5}KSkv`E wI R{'{2>Ư |F?x{J(໼K-՜Goj|E>`pvJPqi(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@O^;cZޭ2gx*tEi]o|[uM|XUM=&=KR RMi_m2":1_n#>q '{Ζ| Uf3܅gtg_P cfI<\XWqګ8C5 :Ǎh}&ɶ^ m1$PE9ɭ?ӾI^IִžuVB Ҙ8?Os>𖯮k%SΖOL60dn(/g?uQoq[4F"݁u"ZxHKqitthz#~։3~;fdhQHM2F9y9ev_|Q'Tߋt4v*53Km#'@, `U:mE??_AKքַ=2$V; 0~o-Rk{U ˀO| &4/LLg2gy$#I8(|%Zb\kθrI|J% *(ϛ>!|2E\k@,@=1WҚ<%Lhߝ*Qʮn0Cr)_*ª2OV/<)'«_-Y$a{E0E$>~+f Mcuk/>kbYMss1G TAIy (f.K*U5md(Ġ((((((((((Ju5Pi>c^t?lk= m[5Aڶk (Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ࢊ+(+`x@W?CZ|HRٛQE@Š*}c-3nw0^>һ_)QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQES[:(v:ڶdy{A6 m[5QH((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@°O*j*I'=gϧEϧE~}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya3w?"Qw?"WME}rGȲTȲUQGvs?xs}?,Uxs}?,UtQg݇${!O'G!O']5{Ya]>.-c T97cI-(0(((ࢊ+(+`x@W?CZ|HRٛQE@°еo_˶ku*kM/°vڅ˧jVO-U(F\ TTFo>񗎼%Gm{Z:]a!c._:Syl5Ag3K&XCQT8@#fRMj7+-jqgߵ^|y|G{}a>SSW FS)K3NoUgx cH akӹSlj[߉_]oxj)w.a}Q#J/rw@{7i~'֑4kۙV: KI#q .r+j/"kúK&wA<8¼>m#m<%oqv:mΑh,巅"hBQJBA d -6l>.%Ҭ =R9 ΀օsQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@°4K]4Ҵm")6-FP (ؠU=F2|]+ ^N/6q6h-a_Sy%2wo4=~#֏4:mVYʠP袅VHN) >83eŚi~^Ar2LcQ!3׿^?~>]bO״[CbbUNAXeՁ^ +*)*7W|k⟅^+p+ʫ4 c'D]>7௾:_Ᾱ[σ==6+޻;a} 9$tk裝>ʊ:]:e(BQBgqWaEPEPEPEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+(+`x@W?CZ|HRٛQE@°=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I4?o:GW+>=;sCVzj_u³ð{I7,/绎)dܭWI\~]qbb;pn:Q\@QEQEQEࢊ+(+`x@W?CZ|HRٛQE@° Cc94_gL|2Dqe9r2w %x#ӽxD{Kץ8b-yL)f0"ˎ K6'|F|_Zn:g,L#- Hgڠy9Was+|kE| c^t?lk= m[5Aڶk (Q@Q@~YV}zyU>6QEYEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP]q_Mv/QEs!EPEPEPࢊ+(+`x@W?CZ|HRٛQE@°}E5 -)/o5_|-{࿈4lm-mQ9Al/|O kqVVe.E}id#Z~[r<z>EV$IJ\C2 $,Ŋs: |-|4ީ^<{yIc  @VlN3ɯV(~^1ijfV;0eTTP$>7ZGoV,[F3"Σ/Wzܶ̓Ijy%K1\g*9r k ZW>0(D6,ƪ }9a/;X5Mk^7wh.&!\ ' ˷z^ Ho,*KIdEI QY6X 8 /?~JO jӵ1W%M@6첅W'i3\⻿x~2G[NGy0*͸* \+|9}sWX0pn(Xef-,SPQeP3_d{wItrA%6$3K#Y dn+IR h>~zVsk?TYVԾ&HH;G .C2mW?~&% [%dŝQB'&FPw=2( ( ( ( ktS[V5C{V̟v:ڀ:"ճXD? f{ )QEQE/AZEg׫OGSaEUQ\ͿumKn {bvH]ötNnMR 7euQEAaEPEPEPEPEPEx/?ٺs%>f3*$~Bq:֔hԫ5N\I]DgZ:PujF+vݒgkEyO5|;xQd "G\FV=1p\t<hΙ-_ѧ6WV( I 2p@#浩N85ڻN[TgOBuM9$gJ(({gß |B>_Մ_gDiجH^B0L#;ƌ\M+~U JSi+j^z(7 ( ( ( (K>8ß ^xŗ&M :H` ff`N$iJS'KvٝZRV]N4|8rMWx`Z$Fp )gh^4,Q᫟i~m[Ž+*ֵpXQsM+m;s-~TzP4JI&>QUom+F{Xi_c 85Γnm%vZ?|OI<jYZIe'2]9yZWR*q5fL΅zudN4WkxogÍkU?K+??;5#h-1gզ*iI]]ZKJi7gg{>ϳ (6 ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (44+/k Ww~+ ( ( (?ࢊ+(+`x@W?CZ|HRٛQE@°Z< (40UPK187 ¾)]4<^2"Fw|O| k֮u:լPʉ߻c#ZK0>g)J0KOYz/ 1y#mYvw~w[|F{xدMleCY ~|xxºFyg?n$J|"lv8d`x׾Ktdy=eQ蒀Z5#M?9s^V?ᥘ¢֗+=\kbΛү2b/ǟ|'7|1i׷_&Ko/dG̎,S |Knb-Aw>v1f66FmG?9-߄y;cU(o^Z[˫:wU;M饼y,xB+K :jQiDct7RAd`?i xj~tZuVDIIYV4$2K#~s]՝|)cX FddXh 0G˻%Azunx5E77̢K3 /C&.k?w eZ2:t9+/.x:PVTZVoy.l{wm^#<9m/S Cjw  S`s^w hg[+T\*ṙ@_~ ,ZZ)-ѡ ".yi3/jZɱ|/_lzd|[BڝjYv['ݭfsG:RFNwd=KĿ_íѴM[ZƷ74Gu9c;NvAH?O|5O~'9mʹ!YAAH ??w*]8&΀0]ً3u,sֿ:gKR0oiQ5+cy)qJ Ӝ#w+)[egCЖm1㍜gnWd&>:/S0;,4gsB: }>0τ<;ky/V mi$3 _ptVR GA5b:GN2QP .Oz/ۛkc7uQ r.%I>2JF]Noq]Uk={kaZp\? ﮛz=5G{⯄?<; I+xOon<7724lU1\W5OyXۺ~ƾ"dڵ9[/.S8*();ԦI/>C鏌_ ^hzwz|tY&TrBg-|-U|jqcp!՜. Oǟ_4?i ( Bmȏm t5GпhφOj ;+Y 0e)xe2yda20Y8^N3\{7_6cOURPRt-ײGx?#[^x@ ;|̈K0E⽞5%+t; s2iŗ?1d11THٸ۟01cJt5j-'މw}ϱV#NI薭]wZi*5 ~A{w5F$!gU\;|37zu[MN#i7V?j?oOO|6o ijI5ǟ(y@mFPr1g;^(< ܻg\9ȗ|}̸o-u0EU *iJR^z4ˉ3%O Stfo'䮚G_ 5O[O&[-3Nk {PYTI _9k?mH~W;ƩFuM쨋`c ϟ+E!s- YcpjY|JKrZk&m`97EW޶y.&|$~ }_P 8_yQiW/Cܛ.:V\A' u?>.,'ϸ l֒ef(с,pHl']45bcIWgԅNcX`劥KʜZ+-Tի#Rwrf̟w1 'QԵ}K_tV=X[*Kp>7Z|מw?%xY,Es6nl3*)#,z +䟎{ ?MһZm5˭,NqNX b9dyx%q\jYԡ_6'ދW{/驜˖6t>9vVZ$ݶTW|۰n#k*eI$}(t! AAп<$Kk伎i`pA+ċs,e$(8cYk٣iX$qƀ33TI'W96_jo $t.-4mfݍLjn  éX!I,ቤb-8onӚeRxjsTd5g^KoaJSݬHEݫV=>9==a^)en<ic;mO@P8P9Ėr yl S,\yXeӖm,^Y:~KRnmmַQEyQEhi A5W_/a\EW1QEQEQEࢊ+(+`x@W?CZ|HRٛQE@°>|Qx{D>]JVP7 Xevrkkh,A0"(ª @b9_F^|G,Km;ĨS3E,O4/>w2¿H(bIr*m4ڔV5cǫI)rNqnni'g;^0%PC>#xs=żP6 Ͱ~Q9 N?|GZuMRx䌣!_;H݀prE}QEM^-J#7IYYj.Kpq9JJT+h||{_?h}"N :{i!{(y̍6Snpy>W>{w[YthgrFI 0dI$QSGjӕUiun/4ttU~R4SM).v~_4/cNGt)XuFU ?wkSˡ 2I$Znwqa% Qv~uJ\S"8M8+d̨8z'̥9=ܝwuc#^UF䵮d${B,Bovwf?t(v?,~)Li5$DA2bx"3)XOkTTy9-r+W_;<4򔜜2v~GuM'L;a1v8XFɌUq>)!xgDRtvڗgeGVnRI$}ERTkBt0QZ}nK ,ΝjܽK+zz_>|>E\E-a41y1 I-Ԉ$f2I?z߃O.3օ:J"6_f9zMzo1+تTҗ4RM+~󵐗៶u*RI6v?>|DxoIfc}6y_d y$msj <+}XNoy- ʑ$ |sq}Ea]I15[]mol%xՌ/B0vkhm7£hvm:t6,AH$UA_|ibҡ,5G<{!l;$[d98PB5OؗGno^4ּl ZH,2m;w=MzOw |cxUuL^hčø$33`*W:~{5՗,G_Q[j: #^=tG}\EyYNm[/I8%t=3Ͳ9aYfvi_ow–.]ֺD;#I$.|R9IÞ7^,O ;oEU ()## R#OF <7~rLC)FNM;^inW2Tō/\chsuoDCpVIP6$vl|C q'w(}`._'׊_[O__ .=73*?j_FtxwG47.yVȧC4+ui? DGX!n~1HB:20C}EyxJtgǒֺ^J,z5_Vjsr{8Z---n?<( +٬ZslJ0S_yxoH@O}scm^9@idbI,$ߊۢ3;m>x6:[kIus{ %Ɇx6q_ >N%jz}Y:Mn#eidr%Wh2K בi1hĩk R\M哀#V7qWKO N=F7گ>3Uԑ#kcq/xUeKoب,DE.ݠFUG>!|x]ӤnnSkF#ƢT^cSmөID3ow5B&c<2VJ5_ƿ^x[O'٬u;KʊxtU%q~ ƥxzoy(ee0Ą|,_#³M̐F2BJz}n mo_|1PԾn|3gy4Xܤ0t݈ʡsg"ӿjD$#in!f'2ixkL^(d| ;1$ɥ((wg]~?mtusA$W1Ɩ&bmLf p0 ܅$GѼ;a{%r<>U <1Tq^;7 uzMgYYO٢ K;9eOxVno#zmiHp BB BNZ\xG&irk ԭʛ I8xUuHMyi~֥47{PE&Tʤ;?dO!H\Q|k0 |T# ?-5?aӮHRD`6Gnkۿe/I>i76w7ɨI<))h%G(߻$*q&>𿊼=M~QӮ1OpppC+x*0<^I_oৄټ9x~k![Oq.2)e<0RH<}࿃?-|;$ Rģ+EE`sk?f ঍z]ƻ ;4[(]w dGN1`^6>QӮtW6TBV@5⺇W'LE"Hbhm&WR2?\yŏ_~x|n# nIh ϻr|ۣ#h4,-:xm.I&++ A|(w^e/ya>=?آȴ#q4zW5O@*FQEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((?_+>5OϯV*Š( (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((/k?K_ ¸1_߅(c(((ࢊ+(+`x@W?CZ|HRٛQE@Š((f>"?{ V!M19hۻV[+O<ռmYӬ|Bh4[y7%j?2r@%HQ|,sGK5,xڍ?gҴ%M* xդ~c,Iv uVTx/?w ky,2?x~9#ޏ_ |AKMݟ,G-)ȁX\rea@TR|B`Ҿ;=&XPEF%{3E&I=:WQIɵf< rWn,nbI,q M(s4? |A^橭\Y5u;1j;G +Ȗ_O+ł *>Ei>#*Qqæy{y!b\3^_:ß:m#&՝c$J6DcTs)JI#oZxK_U< uheX`,= y~w\Qo 4؍a]lDqHQ-c_lOڻ9}Ӵ/oKY,ςQlVarXQ/ڳA7koɐLF Q_PG.F0h:}-@_M$ 3F@Uϫu&*((@QEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((?_+>5OϯV*Š( (~,sW/gNv5  Gajo h ;jl{=մ (Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@W?WƏ| qg Yk6sG~ئ,웖F26=s!wU7aj~Ok<ɗyQavₜl{}QA!EPEPEPEPEPEPEPEPE]oKS\ kKCᑭ-I>n Ky_jմ *V3.nc,m p3Cso4\jm݁IX8̆8ܒ+`8-zKjQg56y Ŭ}"[ȭ̐vɌr+;ѵ[= S׵xt-5n3ʗ*c:+h>M'6V xEԮdk.,Nc٘;$LP&QE ( (&G_ xKojp޾HbFEVf\Ȭ8ڍP4Yj3>Ö7gľ&nڒ?o8;?.H c_JEP (G|3/4fa]p[ )# 9.O#V쯦H01c dKMQ@+ڟ|'ǞE$$E$M>lP4}EAk+Om2"XfAEPEPEPEPE(O |MN|]I HȶkQ 4QE ( ( ( ( ( ( +ڟ|'ǞE$$E$M>lWL"+:eh=Q@(+|-<^6b{ H- м3pTw3Y>gK]}[f+y<1Y.#cT\ [ +\QUK8$ah6I6@9㚳@MKڟU%,|2w*}4.o3)F@ZՂ(AEw¯C^; ^42 [RkgooAY^js1#4 &ϴh$[? ]Z3$tg`0QEQEQ^K7>x=D0O{w7\h^HPIހH+̾ R:a\a+` I eM (OǺׅ| ?XG:>} L PJt":+g:~{[MCXK%uLS@# `䟥{UjE|)h'3x6v6WV-=dH|&RDJێ՝QE(.s/] t72XF;s$k9m.c,5wpvg۩b(AEPEPEPEPEPEPEPEPE_5>}}|mu$VǛT /8AshK? >"T-1XŨiA.Z dq5tI7.܁k.c,5wpvgb(AEPE5|o5 X9I֣!2H%|۰ ;E|k㗊>"u+ t{%VfU+D2S` e~ lxD-,6Hǚ-QA!EPEPE|Ӭg&S_!mTRܲIŸY.T*GO'Z`(QEQEQEQEQEy7>2S^>wÝOZD,30Nw*4ǬE(((H-<$%Ff8ǽchO5|e ?^YvN<*/RoFWi#ۨAEP 7ǿCM_Xھ-J4VQ|K1B gTW~?ߌO$57YaWH[]K<2rǜW Y(U-KQ4[R`K_(U[bBV1h@"\]DEŒ:n_恤sj(0mOŸxF N4.~H+ξxQ¾7 wI2’Ovx]w5 tRmZ[;I_%KE09 z+_n/[iz.%+m2=Yx`v ,ᙖ_inaxS* ݟlwpiPKˍ|+08-ԃq{Ey<9icv|KkM#v󃹣 p[p4 +#Z+|f'h,m,e|]iqyq2HnkXt2Ȩ1F8}+@5`(QUn翺mDHމ,87q?4=ᔓ]$IJЂyD¾cQs5hQE((((/k?K_ ¸1_߅(c(((ࢊ+(+`x@W?CZ|HRٛQE@Š(((((((((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEqEgֆYTQEdK.ᶭklmwctWq ÿ0܅<'Kk"Mc|4GߘeX6l 'ȍ+_~v뾙4іhxQSu ^"c!-#iRH(bO|SOM2x?- }D7'fYP;A۽ )2/ 2Y`'ඐbou-}i֫h֖$ iBF}BFFH1MQI/~*xCcY?5[kYt,(mlV-C(bOܑ_sN?Ÿ~ t6Y4[DLYTF̤`ZN7\-3_'\RSVLY cv >Qc%Uu~/I~ )ߎ n-'N{KKZ,RKvM4mrrCyO!? |++d,N,v[rrHn{>|^sJ}:OA7Ӟ v7ּ_]'\5ŭͺ;nGb@!2RUZHNv*fr+L{OxozeͬMcu)BmȳYYd&φ7W ڌWpt_O-C(x3O'ԩ}בG7xGOx \Dԯbb dQBa 1a{:ğ٣x R.wb۔Ee)+HncK1U?}DZ^g+{iUy̬\тbE#ķ?-x+MMrfy}6$(s^83Ufm6?g?:ςn>6x'>ZΫ֧u%<$ͰGn2{ן~$j~1,jG񖊷2LI^+<#o8<iDYthotfC]_ġ20L&Rc',٤?O5 Sa|t+fRDaBVY+T(l3@=aKYh0jzuma$HFH‰ݸbC`+_B~ij@j&18eYC/P {X+o_'־Kg׺}B+0ytչo=T'W N:oxFេuCL_|Gm&|C SŇ+7pNDjT `ث=U4kE~s\!9׌f[Sj,4=֜>1؃!P1W_$';J L7xh4KT{~3{<'/K"۝E4K-hg (FP< ?kIj65ͭd*8Rb>tjⅼaZ0ƨW`"_д2 9^,>;}C4j~%Innd?P($$5>-^7Կcm/":Vçj.bI= 4!#',jwkŸ KAxl/<[d}JziqckTIX[ ^xW;]x-|xFOI4&ȂI&}D:@\Fq>,Ϳcѧ4Zڅpv+leɑ{;<8 yf }J|A/>jmO6Sv:<VV"CQPɟ' o\QǺ玭E&xZ)훙dFq8%N#2nlU13p !CFCf.mX"f ƥuΧi46:mJUѲ9م fQLI;+Cx?o oֿ [_Wk-^ډ)Db őn/CF/,71TWMX݄[i B[zWu Ohbʎil!rFqb D*ם GS ~x0xkfmGB^4ΉU۫30i+C/B~$؛eok#ֵjiio6$JFLB~$l\?i?<>]x?WiOukæ_Hf{fyqp٣<;/K;wCJᏟui\Fڳ+9gÀIa}jY5yw_O!~v[kҶ7mK8#$QIwf?i/ >8O Ϯq6h2[+FǸd#8eO|\ߊ"֮;plȘCl[˅# #U1⹿~6~a^+C-U`kKyei4[fb;WJg?z=6"l͎%!12F7)=H7?EI_úu Io_*6M~n+ygЮ5x>-?VMEokC"2qq1/k.-oMӝw N;5~m| Ow:6[onV<8 2AʆD6sft4>*jg_|yac=7W-\,$|K6oh߇7ڏ)KдV4E'.egg0ȱʆF?gG?o=*}JMzNkwĶP f+J$3 6?o_1.ނ]!X~7i#xr#`1笀%qoC>ޣ_}P)-V[ۏ i\I3F#ĒkxwOxzBVK0P( p\ʸR@t$WKUΝ7 ;%[THșs 0+(si+NrmݦÍ8=yKYe!9,FO<0HRǀFI?A^OH??!R YW|7R)$pU 39;o>)~ؓ> _QGKp?x](EV@ͬxg> J[KmYG-폛o--BIZOi|g^h\[Z\aM8,7+F\d kOͬxT;MO2i~]c8e2J{,0̍ױ_[/'>+.KK]Z2VI#{H'ʠ {Wy:K|~9ihپ[<-e@HTrPYB*25(i*Ey_Y7~|o]RmO@֭dE lnû+2o_ ~/o}WB׉iU).3r 쭿(k']>kqOZ\NaFyVYe`K-ث~ɭ?ΣJå]i^ th2_/=ENF%@, |,;wxE,qj<6;[G_pOnPwzo|Aߎ_msW=~f&K k>\H7&t gTFt) nC>;x_%joeS,g&kշ@!INfi/mN7+ +wKVr$7Zϋ5h-RDb3<;w $ Hm8i<*~'EDž&^Z&VX7?g/rŷ ܀@b7`xo~ʿ|hO~;=shs&T|#\$|R6፮jke+gv9p;J!>||]V}udFmK0 4qOvw3Jdfޭ ~j^9?f{;V4"1Ē(_[1F]P%'7MК-#yoD闂3%# O,m#2sv~M? _Fqe[MGP[KgTxv X]0@TïW_5WiV> ˚@yyQ7,̏RBg+/~;|9d>كg"nJQg= (&o :tlfA5%`R>Y/_| },<; |=i]ZL&w)=MN嗌w/SI[f:g_vs`8R+ݼMNdh+Aӭ=JJG,5/\)>mݽ)Nr!mI?'#Nk")\>^>]KZOc\F/(x^5$xZ5}\ӼCti夌1\de_F)Ubw Ü蛊}R\>*ox狭vU׼O5Z<6 o 쑄D2 C/|b?fxO>&xW\buQ֢-~~Gث%®*1pvߵ|!_<*ƿ>|=ߤ|aFj67*TY#3d* n~/<ς~2ԯ>LѣfJ0:iWGAWvG~τ u寇AnUCo[1wǰܨM293|-)/ώu&{g4K^V$X[č c߳?ZV̐!C<3'fHxzYZoe7[3uڱ'"QYz,q^_> ;˫m7^6Ӵ.SEMrnn1~vUV[G-gŸ SS&_ԧk{m:;H[)@\dq^D|-|e I<cj3.}Imj:IBF 5[.c%WE41ji"Liso4L<#bG'>%ܞíSuX픺B3YHY^]Fby\ŹR@ #QKhQRQp6dqH%{MK o>e!w4[y9,T!p|W>'x3[ɭ iV'eM9˪ejox>2kJ[K[8k}n&7)5B*U۸|m|7E |ӵMo~0{{i+ U$vGȪ ;W#Nյ(|Km/TҞCao6y^1 f [S7 xXѯMKVI $d,ld 1|:~lN1I¤ 1N߱|u/7Ő#Mx[tu>+[OşR+guO k涸i$.d`qU؃ |vIF"Ț! $R[o*˴7APm W_kMy@_օ] ?[Flu릻4Ch$άT: n_|r4AӬSScs)U㵂Wx%]'"=~?Vg қ*;E>:Kx߉Pn{ GL0܅arf5)h8{#Wiz=wO4w*H * xl+5_ hrn\JdQjqs$#@ ?n/n`\qkf#+ w4."Ta*1c!)P6#L1ѿ+xC#Miܵܰ\CFcNmȪXnV??.k(;bY<9۴g9V >#ӫח^si3|7@ Y($o@7>o>|5&6u No>n~y ns׈~^MMg~afw&/my9_--$8X$h!/}m3^xIf(hYFt]CXWxHQ˒mfV vvW_) IG#Y56)#.w*TG$|9>+7yV6 0*²j X ajڧo-_Pkk{be'' Bf V~_[li%V"t)$erxI;_c/_ |I-[a<7,ʪ.m&vZ,Zg[ QvOREw:[\e|ؖV8~~'Y|a[fmJI#p؀fpn~~ |+#͡CG%JːMdt4_Nc??Cό>5x limYy# ;@~(◊1xJoV:nS͂&06ޱ r0g^'謋_^cV;BDžd7fJ}sx? |J5x^"H#AlpKic0L[jsտf>)%g¿<sug=hV l+ !ݳ _|IU5/moqt--|E);fxd( HO_K,xMċ5t};)%Y$@,$P|e%GxK_z^w?(><驈vu,pE)i2v?O4خ㎩7)_Kz[{U00k_6^Ҭ׺.DZ ǀuL7HmF3_uahR g i`)1A ++=y{Ļغ6~,V[Lc@/Y6Q8`U է+3\54&]vM:ѡH"Dp)*RrY95S> x.*Cc(*(,YQo#v~^>#?> 閭`|Goq^KZCX)W;F;>!=7z46إ=\F  9"gY?! _ <1&G$ɴRVo?cmif.FrCs,{* hNY|\>xTJB^x D֨M#,<:.־>azXN4;$HTZ͌2+9ڥ_=CAխ>(M[giqb6\̙GÀ`(~ܡrd[pS| WGq/L#]r(oH]61?b3U=+J2-VE ndAy죰-+/~}G~61m۫=M]FhdSweMj$N592ه.m/*g~ojo&>5YUbt[cE pd}r Ch|AOc^21$l=}FbPUHEb/t)-,[+KLoʪq*Ho q'wia;*_T G VhvlFX A~,W~ſ |O-uY]sKIA&eطPa?|gw<48I~<ʒ.Wc9RIl~|._~oDZDۺmaTH`d#\g+_⇎|'GOmXi6劵*,! /QK7`iG&`xYEK +lf q|hSŰ|o^M`vPc+`** !\J2=|TIgľ(/xeA3]l,P<+78Scl(/)_h"Vs%[/+m8m|mkψ#K5]_SgmHKhaJӑ q.D8V~29]4=#U}/?"_WKK%c=0D RBpv? |gBgx.i#/jG*,y"#B,l>XhV: 5f1kWV4][Gʱ9h? XZ$gV5[+}J9Osm \:oud]@b OZO_\l|qE =jc$nd,*=/~.HkX4{-F%x6h-ld=q΍SsGxO1~>#| hʥHYT6 Ǟ' K Ml^Z.Աhԟ5gn*`e03 LVsQEeIeB76HS_|Ti?g}io-rJ-F7pnBdRE⏁?I3EM:W9Ѵ!(xl $}1b}j |@hK@i%ڦK2mq6di6rH >aud-@7?*\.Z `܀7W|k~<}~*u6 WTP.!h6 7;_[nb1<?l̤@~ ~/oVFi9wIķ9iZInl\$x_> x׃0xXmVmu[&ӯe.3~c*Nkg|c %g- 4mej-$ Vrm|Pi:QZae.m ojרa@J3mّ*2@5~?OxO{kfM6h IOʹgg8~5GDּKF*5u~zŶe5ŝۡS rAb6A'hdI.e??K-+D{alyPּk; xR/ M7D,lL)o<xHܤW \%\̤gMQ>T7O>2<}k~*ڥ =C"KU.0W'Ifi|i3喇ˋ6Fn+ -N+~ _-wIl%>E ʿa v>|k㟌ᇆl/O4 j.HUau:$enSJo?⯈Oo|`MOD$7+w%ՋG$$h]_ze _fs_ j/{/&탢"!KaAû+bռ/xş ZxGÖixTw;!h(K8J}>/-|j~xXKՎL6qR,Z6Xp>(>-_Bb,'i|Clo;T3-E#rJ+d;~nG4Ȩ{|o<iv"KoIo#yʰ*٥ 0W_ |OF?^sB 3Qأ8y-!Fw(%$&Ӂ7_#guoxWmA)K!LY m `ů~?|-ρ|!|V,tx.!ԋ.uVTq {_\{ǫzviOsx[ 3>;!Bq^ͫ?ψm-6Ck,zn +x[᧋ |Vm͞dצ`VKxTU(A?_As"[p\kH$F{Yy$ c#wZc| .+/×ĩ-e5@KlyVa gOŭnk_OƛC2ܽ֟("1w 7I__A6xt ×r[kʫaRBrEY?8X\xok I,t#. H&M?^[ ivWϠx"buL,FYV1vZgUy 7ɖTA>mͱo)nvʹ\bI>{ei]!%cæ]\Y#9*Wa-Η??nk]kTaˎ_4ڵƚлtS&:' IQ~ ~3GÏg_Y]Rj>$ۢ@a$.kJ?AakKnd5}ivIo%{ifG;,_BV6y_6@¥؁%u8ޱO -|1}>٠c¶2_ NVm={H_éZOi!F~ L{ <kԾ2x{hx#;s̑P8lovV9Eڣ75bӷA}p~Үۙ[w-.˘`]V>m{^[_Ǐu xrOiiV bյ iA\6%lm\O*סվ3~rh>=fHcY$FGrrCԌe?|jK:+|MYj-ڹy-RynqxW>xVߊ-m>iusw0&( FxVT=oGo_z k|# Ǟ:}5ѭ48㸻>`{*dv,>>U?_wx{ǟ k=㟆}s#Klrd, K4{IR-?l|%1|_+U4L1ZtPɐ#`rVZ־okh)WZ f[ح[␷Ģ$&Ir GV A4Emx;*"4=8`-|-Ck|iruKם6 O_>?4 nj\A_Hwa[7~*%V=ު×1YP[$=Ch7Rj5ţZ\F!D=AMi>6|N-O|cFZ&emcu,Zm]/ M :`+P"%pO5W2~9mŊ[s}l8ǖ#H6+6?#Z߅b'4qesP[xYş85 _^̷W XKɍ6R{n\|[nmºWfT9%[B[u+sꎌA'#Γ~ 4>,|G⏇>+Q7K?ǤpʖHyV˴8f_jCč▇e O\>+ WRA3ۤ7 U)(ௗ#/,OZQ.Q 2O"U})jgRS+x|00Km>mX2I&Oq$mkҿh,GnO: mIe)hpNܨ$UxĿ ]h>d,XƌؖU>{M㿊~%񕹊_E7!%r87 =E{?ïH_P6uc~_0ҟ8j2o'FaKGq??ҟ8j2o'FaKJKfzWc? u{CȌKH#}%bWȞuzO~~Z厷e%\,r1ݿ%Q|iZͩ@ibYe i6XшPH5ٯeigA^i"2i1ćl\KmY࿊>xCKOt%s${Cj45B]^HBxӲ8hU8 A^˔Ϗ^[ ᇅQz/ :aʢ% 鸜p(WZw_=Mt]cJ$X`cvny sKM3~V}˸NINMha`P>p?8~'[ZtjvW,̏'%!rP1)"!7]|CggI$t9)rTB(3Wox*گm>s%Ͳ1WzS|3OxxShv 1dk(2؃*@[Y0y׃.KOdm\6q ݴǖ 3re㛯?'|g&y> ݧ[W-erE4dPQBuo#о5|zO7_/,kd{Ky]ϑoq1eL.J5? gogMs^Ҵv7w,p2Awp>#ŧω ~ xѼ'HK[,cpOzWGOjzU։&cz.ba2˓2qĝ'.Etki 6GU&mpVRv:iO?)30yH_soRT6U$,s?~|_x?.=|d5es,gڮXIu#0ˏU;JL;L ՗qWQCX:|?ᝍGíEu[Y $R$rDAT9 ş$W_d͵.e&E;\G ̒O4 f |N.?itxLqyImC(U"@pwlC!At<)gbpϣ|@?h<[rX]YkMvE,v hsC}B=Rm>&`k!"b00vh~^]ٚPȾ܏S+9۟ n1TuOuo~>φK-j0S|b^Y$jO^; jAKvW]o&onG۲A#+B7(`k};{k6zt{ RQ Z 9d+ne'pt?ZįO x> 1ki24Ws7RrWCPOۃvfK[xT& RK;S+׼Ew#- 6ĴoKb(9dp˸ _\u9ZNNͮݬ\1_sKeUc|Ss_CյocBxIеLiv(d;w,B?ڋXfÿ<>^I5PM0\V˹Ws0P7,vom[ு1|JCԼOwO6=r6, *U+NNɿ7?t=mGD-C0̍UvU]hfh|AdO@j߉Wz?σ4IlVmO4 /Vf^!'?f ?fy:4sƥk>uM巵.~KM /'o o:Tg5 -T݂[L]홪k{7WUA<jtOVI7?(I.qge2{-w~~?_Kbzƥq%ȸ&fO/|0е-BƳaP` ,/c\1JfN~~8A>K/Hp?+żMM7YoZ+*huSd\JxI#$zbҝ šGU`O_O߆+CPY),1`τG¯k _iV2_ٳ^p$6?-؂dYW!>$=k_|Yx4Fjd:o7QoA_~?{ைG߉ UfJn`an$NI*\x~+\EkH2n$ Cr@$!h$fW b4?h?? x_Uh`Jӣa$<`Z%In ⚵ğ|G(o?IM;Gд ̪٘rPI !>59uxEoŖ:l=֑"ZbSzq/9\UoأnkSyx.F K4VHnY6X! ܾPÝYOh@;,&2M, w7aC|~i_~.FiLvS=EF$e]rTGC[~}moβ<>hcaKvm\]2̬僓/o|s_~xzkȼ=᫯ۡWjU; vޏZ6{R񖹦xaR^I4K|uЯ'?([?cމ4ag6wn%[YBIny?;)lA}do~|+mq s}UW\ @.xXikwO/5P+.&>5Qɣ{cq6ާu>0xU[E<6]iZ4eff2I]>?~GHlz>/_ ?S*ygΈ5B>WY7REks u"_L1 7X{t{C&Si^ΩKHFq3B輈M⯈cP}]V֗2myݍ2*J"H;_pW_7¿~~2ǨP-3?eDžnn=][LD6FҸ*=/YO'j}sJDҼSoZq->BRr H*K{x''UoiZZc{3_]ϲ)$y$X )ohb*dOSZ3 &ucjd~(((/k?K_ ¸1_߅(c(((ࢊ+(+`x@W?CZ|HRٛU8 Cs dV'TmW<_ݢ['/zQ}&kf 1@;A݀N^P֌i(@QEQEQEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((?_+>5OϯV*Š( (*m x -Bu55TqC#4  9%QE0An #RKz;Ybt1L"7U`ԔP Z((()2,U A8AJ`*=@Q@Q@{v\ʣBp (#HaEC!@SRQED@4W* zz(R)hDy>* dPHʮXAq‚(Q`*=ax QCXx S(UFp*%IZ"Eg 755Ti 13q4s>MIE0An #RKz;dG2E 8*-QET7%[K$aHe>Gj/MCe|5>|D_ Z"-e,yqə8 {R"<w|v6}3-P bAF\(RMEx|x^kςڦAym^qu R#faI}]ŀ *AWO:?_GO7E65Ԭ^Y屹rvUzX(VPP b+x$Qϰh B2Pih(  GEJ8  b)P4qDQ0kj/>9[Xxvm{gy#]Ȣ3O2~bHDieP RGS>,|'~3.]xknEnD[D!#d$l9h,g 02zTQ@{ky%Y$iSPY~XaIdEY'IE-(Nx^+yuo!V#\( F 7Aywu 2=jZ(RE"'Fl;rz(1 "S8ElyƟj>4[kztKtYaG|`Ev R}'{X|w╎ť[q EF!FP#Qhp(QEQETk )#ʑ[2{⤢ 8 4C!T 㩩h$cC aAEBS(2,`5%DBFUFaNHaheg%Po(#]YKFv󳐓 #@ ]_JGpƱB(ªSmPJ4c;jJ(TrC *+,*}Fz)iP$%C` =(E ,h@P>TPEPQ 3:,v 2:J)kЌuS4 r(ua2E1#HcB"Qe(ҢBg((KCzpt!έ}IwyWύ;Wϟ|q۩Xk6ھȳ>9 %7*ӑn)&Q),~-t Au Ȓ8U!I?^ 7}My"ϗ%]ȹ. yF (moq$Wz((((/k?K_ ¸1_߅(c(((ࢊ+(+`x@W?CZ|HRٚqiH%z2ƪFx\ٓoxP<#Vq7=X`c6><VU?<}9L7^1[ń[ͯپc߼n鍽|KEA(l.uLlN6ƼW? 4ϏV:YմCOk|[&,>!n{qoVWUg+ i>&ɊhkjmgVkw]TVMѴ0#Zؙ&,HeH19m#/6~ }#j&. nIAb9(> OZ_#Lwh<؀HIrcX sXC^ bBZ"&pOß:ķ3\h=[~͞ [h ]!.O'Vڭ4Ҫoͻo-0yVEO uhz] 2Io!XT$MYk*O.{ 5ҢԶ1@ȑYF#pP+|k/ ֖1*-E qkGH|i_ÿ59%lYX"HFhBa -OGK"/x-Z4Y$ 0O?2d o#k^#x ^s!f"0bB-# Wrs_gL$UBl>t^C2y%I5>xAͿO\xnq3O/دaFJ nsyύ?Iߍ:۵K9Yy*.p0Hc@PNA+jC.u; g\P`ȹ`t98h`r S?e'_MbgM;okvxJevriX0Sg" ?؏sO}+wx֢-|^@c,_uxKF"{n݊iv@A  ~r<]:֗&+\]ںP<'2*ms<+[/'>+.KK]Z2VI#{H'ʠ {Wy:K|~9ihپ[<-e@HTrPYB*n|:g'ym,mMu}$_2dfCܮ+Ztvz]qHV 6*䀇 n n {v\;f, nw2UW¿k= W~#k4_<[H~T9#/w ;WŸ?sƾ=0jۚ-u T|[$4L ,9Rʹ1o \Zj_Y ү/g +rS{ʭKmXP@rw>о3TE2{K4 GCr'w{E*d1ps?KWcLJ$kZGMcim3h7?0 ȟJ|Oo%w-zVYji̭u!Dܭ<E3)]+fN=98onT)@J f .J\-ď׏tz6OFaGX/&#?xNX0VPruGy~9o:w}|y~fٻn7mlu<o S~/&Iy6fH2 n Rw\ E_VY("_~+-CP͂[Gc T_ %..m%Ctre`C( ;Zbk v~~?F`/4/ MbO)|l($-Y+RU%gǿZoz=BIL 7WPBjrv|\Ӿ8|2Ҿ"i`k4W6^omn /7t hʰ8ǩEtl$<0O1xBnR#ҵ|<\v<.Uz_UX~Ϟ1um BČZ]9$y2yUޙ3GQȒGy>TCZu"$ݞ8䄕Q;׽~Gt]o2Mkkpx$C1'.r[ /H}oEup&_0ܦ~l+0_|K>Nj*'(>4 \)7?0RH(~=xOĿ7\v>"m0*60,($*iς_xsVmmcǾX0|^~,|; sk=XJnbPb,-VVgyY-偂 |A UO0IwYg5Y4hKRH3K mFٴPLcsg_> |6o7?oszsI.>owLd! &$ _6+]nxLk6zK{[+x%h.N`1eB[/qE|Lt/+n4KZa k]aa5(]o |`.BumJPΙw DQcDRErYE~|][}Chm) i EpђIXБ[+ׁ/~-x;fٵmFUm:Q\/0Y72 JWO~A=O'cL~祒,lWx.rÌ uj4WkhnbYUz?y?5]!b7 ].mJas%V0_ n<0xWŞ WN L]<ϝ~TAځ;I~/|_j]4+ `$yec$+ګd oĿSK[4 ;5Qws4pdʐp8K~_s 5D}Ad.p y+v[$8l}Exjz/5֚643C"!D!b"7ꚞw=މk-ԭ4XYH' j+QUCm|f|z4[i!JrHnD8^Ѿ7xǿ?cXu ]Ku*NB(8Oց36o>?_ l%]>#u}6we7In[ mf i !*:]o?# >?xM3YP6m%ז; G3_ϋ?uƿ ih/n {X-hw*mPA]WÏ|\->.|+uxya)$2JI4-d,A,߈.`OYj7sx+;^ډۼ<ۃ0|s'Wo* _W1o>\<}wαⶵsnܞ,5+ qru.b0;ġC|3@?X̯>2.s|k>1ź}tȢXVbʎPg#ᅥ+ -[Q4Ėsf {PK;Os| eцoMvnI7cs^_AQp؈vkU>{qyU|NVU]-EW梾3njz:xw௅|$u58wD]ݘVoCNya'moZά5^&$M菺ORiB[jƦm"2$mRrb+OOo?Ɩ-:Kb`@Tǝp}\W :5(ުu/i8[t5{VjuIJvTͳROYNQ__>+^KMRibG,Y#VHZ'2]|OHWPO(G[VCrcNF%5&;9*j*/3S4 t?E(|m?gk:w,WF?#p597:Ľj{ y.%Ac',$aE)*J|Wʞ~m#aYacn;\]^ug5wo'ğ>?.Pѵ)twK<+R78 y!A$Ik_VоUn4\JLնa$A"i*\JgjeO6#蛷>/g_aϩJm-7D]NN;up/ 3\B2YFǁiĝ{&\ E yf{.D̓JaS(Bq匹$e%uG .yG+:ǫntW(@^&Fker]X>&1'~b$\7x4|V7kHu-Ιkg,mob*V# Ip*YS%{7yZ%} *(F:Ҵvս/}[ٟtW0_ $7Ʒ'% 4Ep`O͒| /:֓]ogM-ܪCJ)* mpS7GKZ09E7f̮Vv%qZFSu#&ҷ+I]gE~iH{w~9w OpkG"g92UUK`~EUKaa%^٬._rW#oP bmu崾_y\cA}'9o}/'+㏱ ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (44+/k Ww~+ ( ( (?ࢊ+(+`x@W?CZ|HRٛQE@Š*9e̳:Ƌ՘ g/WTPZ|; n yG7ONu?0Ž[i2(%U98?4U yN/ >oM7DVO6I=̏iL*g!=k^Ji_uoHм@+{:JBDI(J8AJ0FS` >'xХ๹WcˆBd [ U [|? |0n(#XtTG>¤\ˢ> |J-ibA9k:(-,|+?=FD<b;P(97~οo X׋55ox~)4;[Cp0dҎ>u^>.cB5l]V%+crT7R EUpNUTP8 t:2goXeX&[ 􏅭uC25i"ѢޥZqE#G$ ijRߠr[$#g(̠>)Ï~v_ '9?wkYt5'V(D+t'*5;jxl5!eiHp@OSCob0O~ђtPG67?]?us8kֿFh|'Tɫ˿g U \Nj]iH 6zn K$pIQ1F 2} 6go???f 0S/ɡq0I5弲_-T31 ʄ+ƥI3x{^XWpIkq#b ~_ n/tO4Ox~嶗jQY1ǘ#P1]^j '|kk+u@ pQT*H%f} * |[n}E3+c"?ڋWŭ[.'5| Ow:6[onV<8 2AʏԚkk{h%r684_zhiS4tY-: Bh߲oG×2vsm}krPb# ̍6+ U X3 (%OC@st~_~/|pxzu[5pzi<XJT.pH>qhg1+u=[ԥ<7GtMm31*1Tcj) WzaDZ?h|#7k w'P>#|1ں[H[0/H-U8Rʧ|}rG5 ? 3jjK O)Ǵ@U02Hao 8aЌ4 [~'t׉g`IMc"lM@JՈ'~E_VY+NWw><~Ȟ𯈬Y絸!IYeձpC{W^>Ծj>.']5w.BOrEuI#FqE͹E??k|eyq5b`KT($FRh;?<U孎Iu )rJ!,g :q?QbwK/?l/w,OirlXy<=u0Jq? ׺) N7m3_>1xgH][KiԸza*%w1"|8>|75]43Y=ژnSE,Lܶ 51Fbz⁩+G/_wv :lŌd]%T6, m|Pi:QZae.m ojרa@J3mّ*2GuBG,}U@ɠj}ρ>#|?`[Q46 @e-2QjkQٙ1Ӛg?ebS6~Mt xufr7N+hURJ u\j|ɮxjp9:zΊ忙|-%j%y \4-&In|.OLbx?7=nt%C&ۼ>N+ 7|?bvݷ=q@)+Y^|sRAj$71閻= n~0麾wefi0t\BFX1*#xᇊ ypYaIn"dR(')8Y?Ҟ wFƇz! d^1Wo ~[ (wG(ªSq_|Q@>v?`?43>5i?dO>:4fѴ;" U^[ :5jǙN1gIN^?~%k_ 赨: ay.>bs>SUU* 8RV%ƓaRɾ~zXΏJUU:s +}OD>z?7~9-o^> ?޽groMӅ}7Evbskߒe~;^[d%yG(s[j>N)o'Sc6SJ篫^ W nzF$XX{Sv{o|u7SDu6C ż6sA8G=4 ^Y,p"#;I)PKgh)^YY"Gt _=+] \,fΒek|V50)_ N9jBJ7}{j~z t~+Ѵj7v$^]ԭ#+ $y@yڹ½O ca_^\9J:<-F.*65o URPչI5OKu?,*[!Kh.VE_)ݴ .NЫ@$9P*F $ġx ~+qgUN^[GEd[yCG=*T嫻;71QE|i!EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP]q_Mv/QEs!EPEPEPࢊ+(+`x@W?CZ|HRٛQE@° CFXH@<j]UUQI<SRg |/jE߽`Dub ex.nF=_>4|8wPo{-bE{I+QrQfXW5ڷ4k >,Y?.xCէ?ZjjsPƒ?͵# הW9OnkMe?h^4s isOo9u`${Ni&W; oR`e% k$zņʑBUsoYjo GÃh ss*LJlTB C94{%p'Q v:߈V]={\@V//{*p6d&O|J_FU4%|r* ߲O/,-u;]n[;=" psSѴ>6ckGK@ 6 Y }Jn'~;ᾩu!B=s$j<)P㝬CAWc?.&RN0vH,q #pA.|5WOxM\~>ѼUr"kwP0@~U'̤bG~OKqvy:Ȍ$MP <2Jw'ώ`K{]:@9cIUE qUT[[I@xCZþ#PFTnwcJt>׼wGբl!{BЭo<[|u^vA#?8~Ǿ kk׍wvyI yl8n]_'5L_en5A㿛OD3~P31jX:YfL W>9.cvo5i R\N,+loɾ2|l okP(}8:V|'OwYa敯C*( #]A Sba&}UO~K#VS݂L3gdR*Hvl]||W_[×^)ִ2"Ȭ\ʍ6H?U3N(Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((Oϭ S?"է#ʩ񰢊* ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (44+/k Ww~+ ( ( (?ࢊ+(+`x@W?CZ|HRٛQE@°"um_v𵶝o2Z8!dp.břGVd~|-#[:Ԯ,Lg#Ov5Z5UY?|+AOŵŚqes y%TƢ3\ι˩>E.w{$> Z'OnXI#;\oBi 2 ;?x^1׭5Ӭ m<.ZY-oyb5%~U/4Su].T~cok&gc/,/tQB%dFvpyFC}-෍4^[[>(GeZF۔gi*T(1,͑~> ]/q sGa]}T݅KQyDxKƱŖLgwLdi2d`2POMgS~7֫ox.KHvFQq#$Qrg_/?\Yݶ=*2GnN~\{~Uxc⿏qe>oXbgyb0Z5A=(}i/_җ=;YpDrnn+ >d7帖 i |ͩH c;p =T|7m{ĭZTV 2є_/ "FPʼn袦Rmݍ+QR0((((((((Ju5Pi>c^t?lk= m[5Aڶk (Q@Q@~YV}zyU>6QEYExƿ~+uT7h>6n%vFH~k׸MdPJ{)3#>}.iRtm'd:zܧWk%{tW5 C_SF)#W!??_oS&~Q_?/ NkiGB~E}TɟW5 C_SF)#Q8&U>g sB?kv!??__ɿO?aE~'\п5;Oj?h_5sO oS&~Q_?/ NkiGB~E}TɟW5 C_SF)#Q8&U>g sB?kv!??__ɿO?aE~'\п5;Oj?h_5sO oS&~Q_?/ NkiGB~E}TɟW5 C_SF)#Q8&U>g sB?kv!??__ɿO?aE~'\п5;Oj?h_5sO oS&~Q_?/ NkiGB~E}TɟW5 C_SF)#Q8&U>g sB?kv!??__ɿO?aE~'\п5;Oj?h_5sO oS&~Q_IKSQ֠ 7ueA l8rqzWgO~t6zX >F!7;[8 AAOK;Ŷk>+* ;qYK6{=Q_}QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE_/a\~]pb3 Q\HQEQEQEࢊ+(+`x@W?CZ|HRٛQE@°h3OAb\(wf休Ʃ|ǻQ_ \gikzu߇'eܱ23t*nm N=1C//Ϛ(߈Z:!G໪gpn1T-5Չފou熵 2ڽop MlZ6QLiGixSNAWI_!#YȮ_?~|K:GğPd-̫`IXװ]l{-|?[oo?dkgQ]xq+6ўX9>4|6$ ZNHJYA \HEװ]lz"Zާ_{wvC!!VXʘD,gJM:jIiGE%E I[Eyw/ ~<0xY Z8R%rH 3RAj>+?m=ǂ5xj@- nZ)Ui84r^utW|@Ꭲ?$,q*t."V 1@"IO2M_:z8`'# 8$2~\~ B+{y<{;Du7Mr)iv{xZ%/}]888 +)*x VCEQ@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((Oϭ S?"է#ʩ񰢊*?k8_;M}}!\o?OpQq9L+]k d$RFJ:YH 2ca}^ȱA,p1<&JQ>4mkY񮙡:Zv -&vDnR گ8'9_/_ڏ |(qkV3E4,^ L$1 J1R9 E^r^R9Wvm+ur1|a1㊦JsPR쮓[;4k&BM'ZeALאR V9?ڛPφ-gvU%@n?`mF(߈Z)cbhEwaoǥ:|iR9E Uv6SsN.Fyvo?>hymk[Bch}]wӜv>&w;zE<34f1FR7,r>Sjpgb)ԫFj斒VtM5s1\RZN厱wfӲ~NδWx.eC2t^Bwgۻ8=^ӼGhLm^Vuxj}!dV8Y ' 2[Y^dvVW7/T㌲Y Ҽdf|Y^Zuh1 Q'%t\]hm3J+_߳>CkwêZ#"eVd%eJPx셬|m7 i5vF,1fM9'П1g'do*\>#,P~*6K3|󽏏(>;^(ͨ[k\Ekڂ)*I,d*NGJ/|_Ė^ ub/&$81 eTܢ8c]{9;'f}Ruk 6y|h?kvK3|}|[E}m.-mQ*SʡG*1 5 W#6qfwIɭ9[#veD잖W^zE$tӨvsWS CY&I=j:^5ƺUew:6sC%;*]eny#W|w[SYPHlu\b69`)bCˇWJ@v5exPH-E)Cx8px t몏K;YPK %e.ڭl[X~'"S]//𥵆^0/vzf=*,AB K }?d~ZVH'Ԟ{42O+aJTb%EFJ/ RZC'YhZ MC [XDۯ?C-[D kI-ȏ$K$.DA#p_{D[ ϥ|Au-%m +2olżbۑ?:cAK.t.ݦ('y63kW^#VGn-1<&mQ)kG;ƞ8<]t>]X:0b{`ҖDܪNK h&iBm)G<Q|Fa zl{ `â6Tm ސ9^^-7Uյ^Ukt;pјe্ oறkpUm/UT6$a2 KR? c7㶳c\}3L-m#nHV`33,@\ cc^WZ NHbJH8`P h|oѴ~ПAEg{r@3,q7mNry@+t_ hmmj$zl0#ne6S'!<:~*-~!|9fqg 5 YdI^݀$n؅r9Skv ௎_|'[=ȫudc8$3yy:+O0_YRMB͇̀4{b, X)!27x x?{k.t-z[,cdUWx]wyeIl1 |3;{?,j' -'o!Qٰ A$bFub~&?|N=: B8ڛ3JAet~B$V-Ƨin6C*]0aTFuf?sj_,|?ΛiO_is \Ih.Xo̠cEWĺhuj&ȵc#pgbKrZOcǾ귺?i]sMN'N?~ռ[?Z[n}Fd(428T*ARfc毬|"|z爥mh`yL:hFLo‹CH1 "_ NYk~cKd}b| $xAYe x:|1s}VL."r #Bg(7}^3w<ej?Eωufeമlv>]mKJ6f*;YQPPQEQEQEQEQEQEQES[:(v:ڶdy{A6 m[5QH((?T??_+>Z< (?_ÿm_7/j8cDO%Q#_|¿L7OcO{x4T7BQo~gWӿ(I"O-%'Ow嘜$Cr岺Wޭ}OG3L6]Pb]ٻ^KDݮþ'еͯfqp˩i]q#F9 $I/k?oW^}>|SGMp-a0AZݭ6(dIFKVVf0#Ak9Ry^/ }ԖI۫> Ĵa1xW96w5ʜZ¯jw\\;]q)'t'2O]ێkMs%7o)oY7+tb{+ 敹)$?|8~N>#?ײ^}|08}aҽ'C(ux,tinC X[bM%pFs&g<}LN_ž[ssgٵ}+#r)h'#\_޵|}YUem've~,u] =ē98zRf Fx?('(:c6-ifIIɯ*~SUMw]7K/ ӸIaHhl Ř`!ȴOڌ;Ze}obs(ul g5犫*3!)9s'mQ[Y.l}T& -(ևU8GꛓƣU̯v"i0hXg!~l"#9<Dhķ 4Rn6vŖԠ'MxQWm,sjQ #4iQF ];k-{'7Ge.ҡV]`YJY1!@W7Tl*C甡Ȕl]u7A"A5MO4=(Z]fRHے>~xo+}:׆^k۽JG+XF6CD^X '8x?KƓxĢ(_[k[X3ZFXjOs33K18?ŝs෎|mHbU.me {(rvC >U0JI6z$#Fo/rnmVWm%_v^)Vݵ+^QΓl(}~G ?Z~gjB1mO><[τnKɄI $%9yp2>c~%K9Ǥ EswGDw e3A\k? gZBTRqi%=8&ң U!.ZjpW-m|Rww^/Im)Giw537}+M|?8TOG{M3mZ]Oly|=g'onz()M>rxɼx"D<\b?68&cUߌ|C /<-狇4o%({iٝx'bs2ZI)q|Vև|>bȓaK490m'ޫҪ{_|?WZ΋kZV)i8C<_2,.#$~}PKsG~Fk a9P}ċxD7K{~Z/ z_~񝦊gs+FHEOȬPI%YKWZU9iQ4Փ\Rlbc\|)BNI|J+{ /+c3{Ż?ͳ[˷c;_g`O fc]m o_f[U3V:rWr]>J_~ N_ךΫ/[خItf5ir_|Ğֿ?jߏbl7KyMzdž|_ [6?/]yhcfg=^a1zغvpʛ.罴o](*RE}R~?X0AڂLJ2$24R6x%~E6L[h:|);x7Ig׈h^ Z/#ޙpiJ^yh\K$VWQgj7ſh /> xV #ep>p2+q b\!S_9 \Z3*_rMI3\f?Js'ZU#NrOj$wWWVm}g/3i^o}ߎ7y_Ut꺚$sxHG..a<6%vM.ӥ~\xhdWޛ|Oo.x䢛v+>_H~pkCGs_G'׹,CJGQ_g8QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE_/a\~]pb3 Q\HQEQEQEࢊ+(+`x@W?CZ|HRٛQE@Š(((((((((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEqEgֆYTQEd/j AGbnN>IG_'0+< ((((((((((((((A*Art4ȴ/< Dk[KYcM|Epr1()Fim+z]3c0MZTߖN7GKz#~q3{BEP0' (+JIElz#Y՛VMտV' OodUcBxTHQ>r?OsYpR?k袊;?Š((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((( /?& +_(:B(((ࢊ+(+`x@W?CZ|HRٛQE@Š(MX#pREPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( (8S?"CT?i|l(kޯyů=ͺ[->]N?õOBHYJ ^~_}ϑA|>GTʎ4#_*GP?/dL?g?/@Ol?g?M Q?&?G?dOG2?BTuI"O>}!s?i+zS*:$ =ЩChϪt0A|>GTʎ4#_*GP?/dL?g?/@Ol?g?M Q?&?G?dOG2?BTuI"O>}!s?i+zS*:$ =ЩChϪt0A|>GTʎ4#_*GP?/dL?g?/@Ol?g?M Q?&?G?dOG2?BTuI"O>}!s?i+zS*:$ =ЩChϪt0A|>GTʎ4#_*GP?/dL?g?/@Ol?g?M Q?&?G?dOG2?BTuI"O>}!s?i+zS*:$ =ЩChϪt0A|>Gb_zAll:H6lyBm='j4 Z`ROzBITzq^䪪Ti9`n*zI׷d{wۡ#dqمE)CXKn(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((( /?& +_(:B(((ࢊ+(+`x@W?CZ|HRٚqj"*ȬN9 \qiH%z2ƪFx\v3ٯx_شv!5\88"2bY@@$dEfooƟj1kaXvOp$^r 6AyO:?t_I.4WL/oDpFmO_t_ ״Kkpmnt^>`:jAs·7Z_xM.R+X-m cm,t ~+|oVQ&IdiCHTx  ʲwC_<N:_2GhihUl0ʒAvYtk OC3*]bVFϞ 4~> 6d&.STrm޲.cG w$_|M/¯w7\yޥ: JI O# ܫfg~oj^բԢp,L#|RHW pq꿴m;|Dɦ˧Mٮ DO+s#ΟoYjo GÃh ss*LJlTB C950xG@>!|SFe--039s'z8O|RFe[<3B\Ul6 5MF-m3\[HbFT`9*Hl`^/'O{ /4R¢8{Rp1k@o_‹Kq{'%$Sܱf}] *6g9ۂ _9 |CцN-NĹddu2A}R}|8y4teK9Kf5d c _vVu#+"QE@Š((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEqEgֆYTQEdQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@_Mv A5W+;QEtQ@Q@Q@ࢊ+(+`x@W?CZ|HRٛQE@ϟf>"?{ V!M19aGυ7},QY#AE(.vvTp_ qx[,qft{ tĀfV۝_mMTbG_|_X֗&DRieii&F@]$w~*e''v4 (aEPEPEPEPEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( (8S?"봖y ǹ8j?7l11QHNM}eX/yƏ7}jVgEvVo*y4}jY}eX/yƏ7fqWaa<??e{g^?φ:h.DX!UF"V 1@"@}{z쁑J, 4[??].c\YG<kc2<,8855焾2Ԣl30$""wlpzBpz]Exhuu& cۼ'2";A݀N0 u^+/6:dӯ5&'bR3JƁB=`= ҿc(2{yw=XK© A_<MURVDqF##$) P$`;z+ȼo!!MEW; mWiӸQ_ w~9 "M9Hh40ޓY)e,܊@]Z e׌߇ ]-sknYwK3 !ܬ@z}Eq88|iG4(+HHA;R8; O|!mf[yZ5%U}9 Tr^ú+Wᧁ<;i4Y%bg,T0uB\9韴g-ZMmWGK;I/#x:'[.pM] 牼Gr,>?6r 3HI'ҼmSՒ@DhdHJ BQcI7z>|GLJ-u' O*$~j*gχ-9a49Ibdgc"DRۘ@/WtzE~(/❕Ǿ4<A 0%Ub>TkF0)QEQEQES[:(v:ڶdy{A6 m[5QH((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٚqiH%z2ƪFx\οM+o?kRf{>gsqܚ’bT}᲋*N2MI_|; k:]SڟfIZ>g~c<(l/L+ 'UsC^-ds$Z,dIF~Wʎs[Ԛi"+k![Yi|0N|Qs7g?,kQZwwF$3.vF<~c=վ-mvu6j-G) Y7B|3_׉,u*7wr"5 Hʏ/hm* %$$_o=d<յ]RT徆mth*J ^+>xk,7ú[w{,D0ccxV/]gL~})rI%F ߘEOVj~5ֵmF?"-:IJ*sL#R*DjG͒SM}O=$޿iMC\1,ҹb1 |1 ~axb}[chuU(`0*<G-/2P{Zh$hy Üq1"+;4]7{o k zPe͍6mH$cTԯK: ׈haΑ,vheX%Ur62 u5r 5]xPUʆALN@qPkQzŜ֞/`Kg"{"`beqyϋwC>.|)լtֆh5$&x?y*HTG9'=YVi7@/ umw%TS{ [#8*g1z[;CC ȚφqU5/g|AkritVpO-rLIߴ|7Lw/ xZ I5 ɽq& e` hkVvgW22X|YkP@+Ǻm?k xSjZ>4Od,P`@G<}ooxzF-Yi8ܐ-ǔ_`\ 7gVƟڷ5U4/ iw.LA>TW 탵Vee`lI$gO֏ohA[? gaUu'84{|t;a;uQkwG<YvdWE7u$y4|7L.F_̣LiGMūm_'oDLW1C+ȷfW *@3֚xhGýĺUk[\AE3@wƌbWSA8 QNicᥑ&/YU,XKr[]Ȅ+c~ AVŝ:$qT 㕷fAyϊ:߈t|+Y5 63e.2sěxT2HC)@jc=SGo|\S' ɦOpe9KKy2 bL|V]ƽWGh.Lf&=;yUXa%@yu->`?Kw|IqmJ9`$Y̬p0AI_uox3Q[e u`[%UiU$|zğZ卌 >n~4'zY?;YUw%LjKƵG ]6%OI 0ܪ,ZwDs~0jޫTد_x{J[-V'1]+l\m;ό|i/𶩠j0-嶡o!VR!O̸5??GKiGxFO!&&>\+7 U+ h_xIվ*xNyV 4 zA;b܅T'u55do]߶f;; m2I!kX3^J|8?ZDD۬-%3h@2P?o[υ ~-%Ř-6}Lp_/|z^)GA;8j;|VO |MV1v-+R~?xoFxn1p2Ylin) <1S74j>(_xVѬcÖ+ip+f\J/u/^i4>:ٰ|2DI&v'kqjqRfm3?}նm[T@ T۴>c] h3~h^"[&i涔G6uGw W_GX2<>k{Ҝmy"*  fpGo5kqfN]Z\#.Ƣ2As9C 7N6k\~%`pv+(((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPLb o=eXIIcFxBk[#LkuH<] AHcGw>W<,ʨpH͟(sƫ.ѫ'Zև.|M3Eφ (^B"nvMr3+Ud=oĿ |QwPm ZK6#1 ȕ%<.rBH{x?xzβłXj!N[Drd;3_zqo%'t.[>y#c:TLђo8WBAl/|<Η^ewg/[\JЀZIv&G"10Rc<=gKCN}#٘'/|W7_|C.oC46v:Y&PCC Fs Yi|']?1Z$6W3(ev1KKx]b>t0-5)C\=g _9+ǿ|-[vF&5;m/Ǝz3[>;)B%OU&X+|5wEg{ q[=;_íc|AҼOxb9&Y,"h0KZ~*Z,gQ\?$ѳ!Cs +qfgri4[l]mx3~_5Ox7TMј*tG #=){>~CDž>̞[ eխ WlK嬥X`X:|V~73o&lRT|$K>o[^O *ilTcj7Ig/7@~:|MFwM#Rʑ,2g F_=@ϋ_ _k_ź..9eo O6Sq*XDq_# {Z 'WE'$KkDeHe P d/mD.;=Zm4_M.Kcs |[~m i ŬG#JkMVBm~C=E\.ۦeE,I<x#>Z)≾$Pi2]=^.hнj,4#2V5s\A5E |r-|Q?.\F'1eZ[L"t 3sqbx@^k jVzmqm2tܒYdc hπ2xB/}hqd5C1v$/̻_/W'o c4ӧ|U/<kvs;%A{mEk=Ð $|wCg{/^k g-wkse\#[³F-b]^^?|A'7߈'qsYd+{}̨xb=Mlxoÿ|Z_^sqZFJ$e. W/WX? ov7!I KIԉ#g.koPi:~ cJ𔚜z}MnGHKsf&(POT?|wை0;Zc;E2;DZ&`deN"|kyRxĐ|+Ծ3f"Ϣ˪YZ_±v0\ܼp\YI4w7G Eferat}wE|>3H]YbF;T T>Q_(|Vf,;jlU(tFAٝJ>'y~8. }Z9nDoF6f`CA.zNfVlj'ܧ_La\3& gc 1P/:_▫YA$RCZZ9;ā`\,H8\(?ڟ?mO7v^-Jj/tm@ڍJ:@ Fł57hχ gƟk{ hqC{w׷Xۼ4+f}$I%v5&w~ʞ'4OO] +g6)eqr͵cYF߱X3>/|8)Y뢾Dn_3xZ[KRM }3xFRzmm#SgK ѷuaGPAC1>ҧCcSJ4iwwڶֶ?@4_I״}cBl.Ioui*Oz2H$V|+ G1|A~(|7viBEXvd[o@@Q_00Vfi]O`1JyjCuiG4K(4B((((((((((((((((((((((((((i 1|;KJA, q ylOcpiσ.RHd] M^a:PlW?_(Gþ;|FuOu-:=VCI̐aWq\>=7 k?شJY4rΪ:sSf3+Z~#|O]ZiAy<: loCX 6qxƺ E/5̨D xӨKai#<k4"Y7/Ao1_`'oKéCm, $nAŅJX(|6m?hDPוֹ-s;Z|c-g m#Z|<]os&ducۛvOIEyL Ch|q72:|:jւKR2m(E,P6qV->5'9a6WmmG<]WBaٌlG 0k?LY> xf<;tZVScv"71 c``T֨K[u=#Ś-XGldn Aٓo:hU뱓>e&p%lG 8NW}Ec |+xاu{{2[@tHUd@ZG!a|mz~[mna,tc0f݅*s\N/׉3sOxF]6?7tźxeݹ2 X(%ǯ5t<{ F.eYw ã057<,6`M3ZL\0y|$y$f:ncu5Tkp*FQEQEQEQEQEQEQEQEQEQEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((AuOkZWeE徸N7Ҽmj"00-j#q%| _&bOCҬIN9- `404V8`NkV6 :1apV[i.M9N~x>Ce↭exW,Mmhl"~) \saPXb?hi6K V)2)I g5ZZ>(h8w[R_7IuH$R 2bYpWr!*)h@kk+Ya9\##TrϒC.n&?ҿ _f>׻os<1/_u*1k-g][*'>.z3}cE.v>>?d~ͧ,5x>#Co5H̓ &^FgwIP?*k|/E۱m'[w񎆸;N |ۭ6O?}|ówgm9&cګPS߆?7_?f}f7vc91#i;]wʮK2Ȱ"!)R1)1XGq |S▯3ĚL Л 0X³J$`g ýK/ďi9'|}O;?G_XC &|?uIMMtѧb& ?wO? WM+mQj"Y ܭճL1$ҿ@裝ςHsF\[@ᔾj=jmc`{x|o ?S}+,qG+i}V~|s?h_2O$𮵇|}崗|}0~3 %/O >$M Xؾ\~o!?n}>>r|Lc?AZjP}+h&͇XBr2I&#_7o9-kkkrT͘;w ڊhwaER((((((((((((((((((((((((((((((((((((((((((((?jj*%h:o5φQFO-Gvp@N+:)m@]#o iW0_—jwC#)䬑ʒHKxsW ]j^'ЭQ{x7[B=F#`Ą>exz< u;ݶqVe9I"v>NQ_+UE2Wv?l%ԈvHʩʣH,jy "W.W** 8tz4Ҭw^,9E*ʊ@ZU5翰}ڷKbt_jZEmm܉0 pK}pk_4gH# hO,_Eoavn|AHle?&1?c4_gijz'UӵiamC,InK[LO |r>2yW+KV.TK 0+.1a}w_?>WGo5" 6T4L O0cN#}9M&/? r.묠Қ@?T``2w3}W'mKK Kko iڦ l x \pWLVg?RēC5k[{M#Tts(O$l Gsʧۛ#hֿ`MR8{&ٗffLo+T0{7j-u-;/1*[!XL $ET2HK5;QxH.>gj˦)-clOPp $6k;[3:z>ogkw61ti j(t_'ρ5|5PR/!{w:o11ldya| !xV~jCTҢ;y\mY`7ڠonrA TZڻO6_;I5-Yb(2ĪWL;N:UFCJV8R=W~,PůjˑuCe4 i3c)$Ht`|Fq{k9;4;CL?_jZ,$-̺2=m+8bvzxQFxkĚDž +Z}IJ!8]Sd/ k/-nu<5'BڛZ1K$(*dB~H5ԿLpK4\ mEer(Dbekcm} 4G`C ɬ;PMk<~ڤ7ׄm"!P6In$,,)F e),|) W:c?u=Ε/tI@$3/Hr5x'/)ūHẌ2 ҬČr$W?J_ DԵj_i"tYg6,*#!N-;$@|H:RH{O1{^#˰%WbhxQ!j>Sz hM BB΃7[#ߴݨVXxƟ?7xC֮4ZٍB;R*q)8܎xW@OLM<=mӚ$67~Tbu#JYb $GG(]|QҤgq^%FV6wAsbn&=zޱ(g-bռB/Qjo,$S}Ӏ*v |&bJ:.|FG퍩|>{þ3.)/x襵T"'EK);KI[]sg|nyxO_O?kϫx^((༿vov Ld3aJBUb;3?؊Oi_ 36zcd`LaĩO1O#IO?x>RgMPeteXp~|~Oօ{먮l.I*,3)g|(B6ucĞ46VHto+?+8]:hy/k |3nvxRRoڹʃ_Wៃ>x#Ly J2AHgy1'wܴ||/>M!:ׁu[K@D^M NR35;\tDqL!|x~1|-<9mmFHb_k|Ȯ@gi c{Z|^.l𒩵6Hا#>79}15UM;(R&zNƸRrQOUԬvv7N~|:j5$\+-wMe򣄖v*,23|#|dG l!Ҵ[ikk#ey[Lʥ(Q.efVǷӫ:1ʒ{_m魕(\((((((((((((((((((((((((((?fK~do" [C#_OY rTƚCKx&Zq1JG*ZL``jO&uNj|Ie|Q:Q,z}icB^2#c ylAN:6'Sό1^oN o'6B՘fIcARN0v>% ɨ[xCQ 1j_ Unm/[^Kq HI$2o5B~?b~:=|&G o:B<*\xSڑF҄L~%xA9|w!cC>Oqܡ$4 &M ϟ4ox˧i! s弾2ZeT ;>8| O>uW:m򕼆mP;;0$WNK}y]ڷIwIZKڔ xEu푊Wã|]SVsvMQxV`m%"91 D@ 9X5/Knbk_ԿOzw$ֆiZYZs{Ȏ").n!{M .߲Hwf_8zZ>4+B5Y[tP;Aml%|7|7[ti.dowr%JEhfı lo!7+|/>+|dxþ(Pک$Բ9>29_sp<%s-յۯxX^$5n&8C"3 ^GG$(_ 3B? mnv!/綒{Sjur<m4+g> ?9xWlԀj\ M#* ՝|ᆟ&MǦj1fLg~5o+_-Egoqn|۷s[-NgFt<}#hhٜv[=~u~ڟŸſ|KkE_j h-& ewJ-`6@O/S'?o/ |<<}+D凃NXk*Q oC/J /#k?۾g<{ӊ?d-?㿈~E>!`)K[-GR 3,n!*{{/|R_&<[ili"4lV4+\lE.Ͱv? Go_n?};7_ۻds8=>>xkKL~VQE]K [ywj>QTv'sG Ƨ/?e>Er4ަ 9AU}W^=/Id^}YhY>\}3p<9&u??~&zORMKY vA -)9&chII#w,}o i>v}~?3M6{7QLG |i> ~?gO^'7[J.0y`V}rOvĭgb/SRدUV7iP(7˜L$|G]x/ğeiv7m'R7 fa?]YWF+(?4ͫx+_fCgj6"lS",FFbY<L;~/yu5;l/*Օ7 - {=aǍiS4B"5mAڳᯇ2J>+2((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@Š((((((EeaH Z((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEV?2Ry8U>U!.7PħRzu:<u:ryZ++'u VWO?'5hӯ?GO?.jY_<_~ӯ?G'\բyN<_~O0Ee?yNasV:9<u:ryZ++'u VWO?'5hӯ?GO?.jY_<_~ӯ?G'\բyN<_~O0Ee?yNasV:9<u:ryZ++'u VWO?'5hӯ?GN.jYq^Ispr}3ڵ*\ZŠ(EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPY,l[x!L91OJEQEE Rx+xu7L{=1nVg~GWjbTkܙh   !'~xZ4f;#HYX$(]%*>_.kػ65(gt+FmCƿz# ;i.㷏O2$E$RX)#aU8E\#?S%얁MOnr9|I=OVѼCb[^rPȩ2%b8vI2/O>#Y2ɾY0470̬YV5e~V,UOUwߵKXèeZmk,i,[A q"}w3>Ѽ5M{?Ik5pZBsk뫠ChYt2g0sdꚷ [_,8$G#h'^&)=4k8M"OxaHkHH2q@sIFM ~v׌gutQH!mȮ급U8Ҹ[? >"5.M{٭#;tRS̻VMRk:OΪg[.䳘ؓhw5 3:]q y*Qs,XǒIEklC V{Ե2mlK"D#E+3ɪ?_}CUM"G=в܌Kϖ䩿fwܞgd}xD֍sz-6?6k[rĀd I x]Ú7n[-Ŵ; ƫ ǀ n;K𕬺φ5YMHI@Z9J|/*];'ď Y :իBulZD!1NO3麿>x3QƗmqF$0"pASrH~)6 xuA~.J%A k _ x[3OK{#1ތR"!e-K//tyO/|Mh[Px(UnOa/G~![ǡjڴr,VDgiq)F׼I|nKUi~i~D)!BK |?-r/k߳σu:KH|O XdQݎQM~ҵqs">?jZmEIfE[\CnN^5U&*!=_%:vhrxU$ gio<˰Si,@<ف\CMu"LJ?g6}jQH4S판KA2A w10-RK Iڃt3jW2mqmX''^/{0M[yc2G#FayY#U-!Qqo[[_Zeks \(EX#uT +ox&stRI ʓ; Q;>_ecfoSFXW[xfߴ2G'cҸIjj?1C:ܽ`q"Tˎs?kO YAui,b[p-g)+mGBA_ l>Yx+LYdT3qJKW*qBwwU!5 6;[JHf$n+)) ws]I<9x5oq,ʸL!⿴E+ -bݡ7,DC1 cײᗂ4\hg. a 6"N[E[9'9BAwwV$̋$rFцUA8"./xF{m< As,H?P? J͢ӺQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((!\j-QBFqUYV? Z$QE@Š(((((((((((((((((((( Vun8Ïc3\XC+pOW+Dd??u5hQ@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@ࢊ+(+`x@W?CZ|HRٛQE@ς~h<% ܗ͑ y׊'ᆙT:iodREg9}D6s8kFDOo>SWмGqgs=sDO#hE d2@c/EwWoxO7#1La ckuǽQIݾE ūη \Z&oG7 m1෎b~-|-tzoj*aRL6Al~WQ?jͿ.x&OCФ+fR7\#pCvWigxwg~$YK{Xb7(Фjjj]tzc DOx_KduiY2|E񮘾OtSd7[d`@| edžf՞&c躯ׇM͖?"Y>#uFxֻ V$.[_?/<[_ ->lHm#Gfr8>e.Y. d(i'diP|߁^,~!m\k5Mycvȣv u r9|-m>%sb I濸6V4HPOǫZjqr矈_χtK4OQD``,W8 :`?kZkž44)$O/(@$vydTh9QKMKӭtȤd8K :ơCI#ef<ɯ cSuz|]fs .RA*v20n>It6V_T >+H_ج{w@x+Â@'>~ |iΰl<_e`6kNQ*>/}]:x/QkX$㩵xo% ܎Ns&yMO~ }zXGӵ=U᧙JD :+۷nޟLGwro.ex@ZjÕo<#7 Q)x$D`ȇgA iҼ44h_) PJِPFBX!Ĩ,ߢj47&s/~X| nnvq]{ijyXi3),U€ 'z><)d%ާ𼤈Y%,B v=+6 #> xVoCV5j\,Z3,eQF ?Z:(niQH(((()@yճ'ݬk·l7Cj٬(@QEQEeZN&k*BwA5W= kc[|=r= u%)NJvy(&FQR{ :+??M ݯip\X,Vd8mg;TA5Oßpup--[[-hH$a%% )l|++m/|>wn-PXJ]=VC;F b$l;χ D:i\Io=ԗ7r`**[#H JaERP׿mُ~-[kkq$VSE2ҭq-`Wq5㟱mߴatW:ޏ5H$H:#*ApE_v*+߶?x;*X{V uoiisyf#;ex"tV%p+5~1l<-A~X%[}Ξ\;̮7B2  R֬w;:+[t}LnjKm\J֑CevNлH${o>wíbgKy JXU7=4Zա]I<,|CՀs,mk+||! ݼ֍pcQU('h; :OÝk Ut?O54G(?Xޭ!wGOƌjԠH×ZId[^ShJf9Or-.VK{};JeaG K__~=xk7O|$z扥O ė:Z'tU{amg6}'Iӟ@Il{qMWw{7UOg2TqU|k Tm+fv':\c}jqbP F?ҽ;c/xHHnKT*pq83ĿCxokj Ș\ 82oaٿon۞3^Ow" 0]# /zxժ&ϸ*B]c$zV%w_*=-~0Q ;dVc V]z^JGbW9mI$7?q՘̷?u=C/~lnZ^̣G&ҏs7$Q L '"&O9枝+9]|Y &|>q#l[NC$q8WVD$U]N:'oAWƽQNmﶥW1%Ǐt{iHnK!!Sxj ڱ.>%]-eH Q.ci$Cr03ʧj&g_*7u,P9fdG8;A?}S c[)⩿iKZMzh.dӠBXlqC7s m[M]k!IT!&DWm/](JNs]~HԮRV7@ _*B|_T_z}I|>Yӓ4M[x/%kioIE;ibQdm5C'O,x-&dqo7)* .QI,=?otH'XWhb g涷?ޔrj;Ex7f*x@nL L=B`#{$oܴzPE?yE{-GmNPϥ~ h3ya,9TB99şYi7}O/Ȕ뗲ア$ǎ=+/|{yNn^4$ǵPW.Q@uWvKcF|(nD&G獸qwjũBj: JNrLx0G>kߛy/kK(hM^:T'NTѵIٟ<+Eqehc?0OJ&4 t QLd~>ouhbY;VvWP}F #tR >y/[q n=+oX׿'-eȂ6ʯ mTX.uSwꁟ|$|6|+wdw%̏3qE! y#5 ;jUy/_}Gÿi:5ψnX=}77V=#n'=?ZmYwss"\dpF͒NHurgakapTsBssǢ]m3UN6>Y0jܱ{9uo-#B*r1ו|sNKH '2'qRW߶?{7ᾣ.xue.H<O#n ⿴xO֭a0:S\Dhgt$\KtKi@YQcWWh$r@?V2|7ڋMj=']x.>YXs 2/_qCxrxGQ2xw _6/|\*_v} ߊi ~]π[oRt1jWa|b@)f-y=wW'^x^ѵ/4p]9 2"LV*th-_,|:׀u7"aky"$s˱*cY߲ߊ-C|H+|m;-fiK-edI.xhVbUd*JH_1_|MGIa2x[מޡR$0e`0P@>Q#c{mX6n$'FE Wǭ+]j!SQmiVʪmjY;ČbLT;4Q]{ƾ~x/⏈} PLmp\CGi"gOث1 >>Uf/RMO+x{O? ?>_9.u%$["&!\MmciM~87kOfh<i|8  v /w7jPI*ʷB R'F#;vh 7%}Mi-H.qPmGdO~ѿfkGn<=iɦg@ѹhqA8ʩ{5Gx^OSVz?2,>TR k1⏏_.>QeԤ+pYhbGO3u[>ձڞ2xP1;QVč6Ʋ\u8yIQO:5dp,<7Ƿ6Cs0ɿQ%v7(峎3^[{xoĿTe?qqqe˾dfQҚ~Ae/Sz7σ'D,W֭Z4HLNlƿ9\'-MIdURϺLe>-x,~5]xš25WH\,Dѷ?!dVƇ; W߳w;q]FK-3?hyǛ6) (߆Y7f |7oW⧍|iY^;/mXe"(7;)aB'%G6x ~?sQHo)-pv\o8~4>(8LBz-rVpڠ *@Q?ßoK|fI]Z%Ir u`/3ξ!UxO-r?|D񔡵K }Bc%w*"Mɶ >h{Io[]Hg XD`AVErȯd/? 4ٯIߟi0sݮS"|xϿf?Nլ|q,̇O`$PKl^l i_~ 7 Ltalm'?ƥ!8&c8ٓ:f~_,36R{M22*ixב|;0ߋ PnM?fN|{gw>Mz/3xKyiXffs`||1о$B C¶3i2EnVӡt \*)I6 ; ]CIB N{+zG/WRl4+K:L/&7>fkɒ휶GOp%6͕Kc$R/$v-4QBsZk/5omŤZ󻭨BfXA!8P_|=o~NB5 \j7<w</FymUی ^񏈴_%煭t뙑um~Gtb,M`A 8߇ u ~ϺOJ{I5k c4jn<f\Ǹ(;W=dg6%7LI]so4-4ph.de9SSWw =5?rZ !ik2,SxI}0p$._sáCsN ;]ѵ+[&DOG|FuڰET _u]m‹E%.NcpyW|f|[OقW|Hڶo=>'wx3LHs(b#L0E4kV6m[ᶑ&klRSBgC iRqeo$ng.R <R“ڈc/x[P^ Դ *)%,!YhUfHRq؞+~~Ɠ"/FuRk8=r^\\C"y BUrAKM{ڝgix 9%֚]%O@kJ+Zg')k-?&&Ȋ̆%ب@P q5D (aEPEPEPEPEPEPYZ'!Fk+Dd??կ]կ/(_uѲu`QIHFJ1 t~#|o>Rt 4&6 F#V:CK R^J1\ڽ$g~')F\t[N-SԿ&O;KS1X:ήcwYdG@' .5%D&J6wwiz^91xN/PJeMɿE~ݬyǝ>_'ׅm{<=DvL-r rҴ3O,Q*։Z}4kxeg۰&Y+c(b2UsFgz2V]m= ڝY|[ݔ[niGԔQE|YEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP^/E5gӿ`g^YzJix#Ү$a b0}e]~Ck~rίC+&'F< :la5+fZZ|ij?BU@!5ßKr 9kARr|/73\})x 5*݇־5q&W) 5w׍U%U+r=s^2\A5G s|Tʏ#O5SNPk~o][)c-*F>gOsaʏ&^M :+/Dk;%:(}Hx/MS¬[;"pI>T1k-c_ amu]Pp]$p,ʈ:'QCo"RKz˸F8ny~kNnkIYgu~"LOձ<ʞծcQo)kڿi k+}[ۍ>E)d*vފ _tY.-gF)kN3OnF*uΦ)Fr.mv|/qæh]VNԡD\J Gm!Or+bE{hm'4ƽ-&[+}_B4)b&$\ݴ + ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (<ë_ Km 7 o^ ~ξ) ~^WRK4banv?֞V" Gum#(Q'?fO|aSgjVYѤu#E$HH7.ÖP ZF:֊(!] G.΀Mʳۤ!,X}#|q>=xS񅶛Lh5}3~T>р"eAєPX?fS[{\A.v$$3KsoJ> v5E#/ xsĚL,O8ﭛR+pK"JapLk)GkX,QEf0((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛU8 Cs dV'TmW_O<^Y~&y%C*ďቇf&j4LF/!‡vnKIj7.ml{38XCM]ln q<dJT@f$A>m b q I! {R>ۀ*Ovc)7.$|^ӭjxi:L}?=sHcDW_o;Ʋ}Х2b*CS]h#_|cN.~Ǩ ͗]JO ψŏ{ ]LgyK@OJ}+<{cc4}+.UGE3(VV{,ë_ Ow5fIl)RBĨ\>2|%'|_YyD6-G+ve#sҺۏC>W]/~+ŰGj6bJ%$kjT"+ ?F>"/:42z۽YdVRD,܃˒NHgObLWƛn6/Eڒrǒ06_!G +|b׭]n fӬgiVo#6X12wc`߶- ^`8ӹ5s [`гq5 ZeY_Z[*#r3)  O|ş؍7Fd yn(S3,:e-,;XW~ց1htXSz>C ~_3*mouHGwq^A|%վ+xcNאi!uT.G\S"c0u3tr\YAXRK3)`W{v%r߮˩?L0YEtYYwpNo7(yLJp GmyizU߀+n-gYoB`16\*$'%AGxw-#[sG|g1-i}ωV6[%crTn>YWiO<;x.ogm-yZjWRcD_d–~v t81~yқ/Չ-lutQE`hQEQEQEQEQEQEQEQEQEQEQEQEQENJ'ݬk·>c^t?tD? f= ?V`4AER((wSpПlV!e9"pO? R|f?{բ˿֢fOGW>]p5h|j?{٫EeeQOE.Z+++.Z|j-vjY__weQh V˿֣+.ZG]VWW>]_wZ=բ˿֢fOGW>]p5h|j?{٫EeeQOE.Z+++.Z|j-vjY__weQh V˿֣+.ZG]VWW>]_wZ=բ˿֢fOGW>]p5h|j?{٫EeeQOE.Z+++.Z[֢d෋ʏO4r{Ogo[XQyOLb.zCj@QE@Š((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(+`x@W?CZ|HRٛQE@Š(((((,Ҿ |1iuD {ebIw9O*HtQM QH((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEࢊ+(+`x@W?CZ|HRٛQE@Š(((()7.#8@ EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+(+G,QOdY㞆]bjx Cs dV'i38XW[;2CmrO;$nU$ kG/|Pj jʷxvEWpppN>o|UuO FU;qYP|ף $|:5zQ]<#,F"*˸tA}1 i1d1Iq7IbV7٭N')élgEWht(<85쯡>4$x-Gګ](a># E(⾐о '>$xr)SisijR; %9(` I+N&ާM࿈gy}y%eeaYÐ|AmKݕE]^Zy2l6D±<|q?H?\@ N髵/麟=KIC6sX+krf3 d>OXaYEjw^6Z`Y3G(Y| lg~~ ƥxzoy(ee0Ą|,_#WDqU_ 8U8U86a]vrQ~OwIs\|>]0,_/?fzAu?JKmgŧ~_Z zx&=:,mNbÜc jjV# .߹}o /v^\o4'J%$S?<#UYv12$|RH$ +h?Nӏp#tgQ?-_dVRZw (aEPEPEPEPEPEPEPEPEPEPEPEPMnkt }Ƽjٓ5C{POAڶklsDQE ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((*奵712**GV(t_7ֻѾNgil{s'P/ ~>"um_v𵶝o2Z8!dp.břGVdw>k6:mζ95=Ps.'!Jb+]'Ï?So+[⁠t%L0#|Dk#̀8mjm:T||; 0WIշv,X{Q,ŀʔ,NӴu DCⶽ[ aeio`d;@"7Y:T|i? _-Gַė{%y bPG .⠆vRT;7ǾCi oM:KBb4Z6X¹~Uʏ__g{}~ᵭrS6o#P# d{Ѩ|-hm3wcHMy>f&c_,dz =xz;c_+Z|^K:-նH G&T!H+7a||E [[JVV!H!ͺuP{y|9>5s|$aKo$r*fjz]{5ռ]s%&V OPFvA_YTPAƐ ,qDEUU*Z˙iYX(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE(((o_<=x\[=;{hYM~SƟOϊZVi$o/,ֶA$g+, <zЛ?\hC|Ǿ"Ҽ9o{8U&\m3g'hsPs/s/W:'S7qyczRWO^vh-gҭu mr'o+ (Esc&~χu}z# m:S1g4;q(?>(?9Úٳl[iK:Ht5s^ 7(/,E Xe jL[d*>].+`^_O>ji>>=}6Җok9V\6{Z6AmBx/-fI刌IPe#"4hZ҈"$YG̓ |U[xާm{a:\L#trYdMk%_Sφ?Z+_K{{tTm N,Qc݀}fo\۫mVۜ8oc);E.}$mIuf& QKI+h69;H<A<PGuk"M ȲG$leYXdG K>xvŶ4#o- xod{?KSs M]e@WWn{MUmgA My7­^;[{4[ZeYu 4)o7V}RGk$0d5߇7 -tk IJܛ V%n48+ÿ߇x(|I:2HqV'S叛x>h^WXN јoSkݳE< [ y~G2ZmG]7mV6fv7qXڗǟ:1WUmó]Y&խQnVɘf#ĩ `z?xs ^`Wƚ=_4HkU$Khmc11fpiW?J(c,.?WĭQu kPTP(mUP J O9'|/A| 7,X%o<弴G&L8-;.zW=h ƫi=׷.9a*XQ1ri œ#SP"[,k((,If $oA7Ocڽ;F_Ȯ)X`$c _>< 7CU[ vaotG`ks!|֐ǥxcᏉ {HEJRrRv3Pq3*M/:+䟂+ς#j\؝kÞ",SH#hXsb_R[WRmM5OgtgK$M=hŠ(#(((((((((((((((((((((((((((((((lٻNм-)H>(DA7m6wmko~`ǍqW:Y>YUE+o7o᷉uѨ[Ž?,5C8@DD w־g_|B[ɩ{DmHkx(UrM¹W~??jkbK!?~=>Z]'̖ΔygE-Ʌ 6UH*G˓]KM;1qzď~KEnd` om#3ԒJAxGƍ<ޟ>1տӤ|WD4)C6ʭ/pr<|_wOV9h:]@4" ! AG`2:u ^M6U{sJRqnZW~wRhA4-3~Xe/;;~v}Ewm&vMTᔐpAuaiz7 7!,60ݣBZDJ([~*|sZ@qe}muI8V8>ř_V^ ZQ2W&O42/i)YIY2Qv_4kѦj(s ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPw7O SǷ(ŌXǪTk/?xF}'? xA~zεEak!dD4e\s_??dߌ^!QD|Omᡧqwi] ܾC>i_xFse+mn!&arpk%?+/?hK&_Z\;Z $@W'X75ۿ/OxQ֢7#b#7 Ojj_?e/? 5 5#i93}LL"?*ku&cF|mĞ eNI$ %ܛ2cYV*z&Zj{owq iN6ɠ}Hh|v#v(7e.| >>K[et,漄;I|[>m-OXC c.bcl)V_N-c>?u5߉>fjɸ*V9{Tآb+LT`PL-9k(w}q GIdP8;Eb"!E,E@'}w7^H>&d}f^f'򟧿 iڏoWǥa}6c>3,W~iV?C|JJ_T$24m"0G1h²2345l%T ~ğz޿5եڽܑl-.,pdf* 7zxt+t][KkbY+snɇwhɏ{d\Oz?٭R>!MR"(bc9U<-(\&pH4_x Hm;Z)3E̘\ "v?ञеٓ$񆮚%&ky4LLsǭ{Yt[W҇l 2[t!Q*JN7ԿdOr῁qc/bӍon~ӧ5gF%a|V>d|8/ؾ?4KMJoF<Җ 3(&귗:dt< iuꤌ$W iCY/fOgX׵ b%f- {|ҽ ߎ7_ԧ<mmX=NI|4s~`9U!PjTGcq xWĶ>!K%%~d7bsEƷǯ~Amn:߆#4hKK{h"(p̹^}oz|A>,j,$Ѵ˻g9Qgވ)|uOj߲?k}I7|k@ۏ?;|qr:]B#߆~Yah'3=;̖;U Sc?|׃|Z%G's@pn"`D %\~\_A 7?J~gо_~%_(xoo\>IdW/#$%nfeRQ =Bz W׈k4ڲͬhhO=`bbw#Z_|73xcUtZz5jDr\ pD]@gv,K}9Yɦ 'm ?m*>.5rGt"k@؈x)⾶~*| 'v? ]ZXgX߻.fP\ɴ  *j˗+7iדz#)ԌbGM4殟ZOx➷}d/t%CYWS[ihfOi[iYxfm*[ˎ!$B]*-g7ן<=3ƿ" \i+$xH @MG.|KOO7×_.ԮܴSj21eR!a剜cJqݗ,ڲ%W~0RSMZ1J1PIumݗ 4CDi-W%4-6ɞhLˁE|}jIT{T8*kdQEfhQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEEO\+8/o7_|7lroٿv˜c#ß؛g⿁6Z?Zg<0E]i\ȓ+{I?)mENMN}>clHF~4$[ ٯn>?SoL9~?hjII:_36o}}|zVEG߈ng|~s_zd‡* Efn+ Ow?|Uk.d:As#AEV_m/ɪZLӼ4ȥM/UømJָY55e#7|mf5=Ɲwu4t컯qo3)fRK1coOڟ,} i-Ba$Y)CXm~i ??"Ϲ>ſs4|:|m݌9JCſOMj⾽#Oŭgu$iu7LI<^S YٝbPƒ;l||8֋M~ }Ov~C3)y%Hv%N8|3 zg/I|O} Mv &''l}[';)埉G_Km1Ė?[v? .=2`v 00Aȯk/ٿÞ2O~7Ŀx{G x7Dʲ6P-^@.K{uCڿ{kTepe[0@ىU'#l#c>3JƯ9y-J]-J WysF@̮;3Xv_!/f/hF{2w1u,w-T YfAa*BA9bŘw7 %z+>O?ϸ>O6;nyg);G_UK;r:...oo[^=%wp~UTTv|im W< 4[y>{X€ܶc^>(|G~o~ZZ(ͻ>LuUn1U}gOٗg)~xFinIMV@Dټ;Qp(řG_HS|O-&^CNՙ3u i$į N?5߲?]u7CfOe?ڟI?('~/ζ򼿴\n3F2|O? _ik>qlhsW`oi_ %~{hI:9L5;Nv˕n>|NxgKG/j~W治mo%h@-$R#_z\P?cO'a*S}]4z`;'(<-6upa׀=(jZG%|~>hhVֆYab(RUʾ6Ь~x_WWj:Ɨe.jKAxO(JSW:5 !sW6\E-hQ'YV^s% 3iolK3Q.-nx'A)ᔐGi}[Ok;z;/p0׷<\oc?Uu LJ6үl-0wLaI. P xo+(k~/KZQ4:IihJ b;~:IY8Ut|dC^>IpZ}íhk1,Ё8um0 sx>/G c꺤3D,f2ơhMcO˧Vx*qqOܓw'mra ΊӒ.R\g6Wa\hZMK96Wd5q$B ",E#,ʶ q?h~ k?ڸu}'N Qwen,@DwNgahձ}ܑs0C7V0N7hpJ?|\>OĺK/-ţ$3\fdQع " j/w_Pq-ka_-]JOn#庒"GK[\VgI[%F czrU-pAcoOڟ,} i-Ba$Y)CXm}OC~-˝zĶgygʓ2Kt'q;dD2xq_Na} _o}/m*Lo yVv\I9\}PS_\~ ndX[#TD[d%OJ~!cN!տ^WٯjSퟁuKcD{{_ Ω,; 4L팊mŦ_ 5E~~*Kdq_Kmy;ps?U<~Jia%(Vw X^Q7wqd|Ilv*g@FajCrr>C6pס|{~'xÚO~cmZkrͮjk]Ÿ0ys7|ռsyx֥NϼOo&|;@Š1+2|> xĺׅ/>0j Q[RCiLXL6H94#GFJRӄySQn<=]G9ahVڒS4RjjVZigυrZExoZ]Ԧ&obGwyX4>f'oK4ٯv&i4.rϑwq,@Ho79y|m/2Ѯ>3WI-dG5Ym6-a`x,AƐ ,qDEUU+¼=YUgk)%v|ͻ=4zpnowfI]kEWϞQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEP[+TPQ*+ޣp85j(6WRE5ռSIȊ9RFA=*PEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEP]E(2 2=A ^E1ǒvD'U(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+(P_>;/-7ϋI9Şn%b%$6^>oUgXid?&yNQU[}Ey~=|_~O|]h m!EgUn/xe-sNg1eEm(J 229Q^Oώ ;Ǿ.{ҋ''X› {5xG_!_tkN$\Y\$UDNc!yq`wW bၞ$>!o-Vgo #cS u}w h6Vwby"="~_1rG7z+G%YZZ:^H#ǏsZx&Eϊ4/{)ViZK(wАM+0=*>މa ^#0.VIn~(e$`W£|CFrH`"D AdG"<}G#hghx x\f=+7χ,<?^ݕg63 0ʉH sH(((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQ\/WŭxULɇ)$K&2BG$FEe_J7Vbm/VμG *$&$隷᳿g?m# 9' o&eBG ȟRQ_-g~3G;sO=FMAt8./?MODo;sO=߳ Z2o !wЧj'ԔW߳ gGѓ]?? 8S>[gG6wC<-z?֌q0]_)%6wC<-z?᳿g?m#dOB2N#O(᳿g?m# 9' o&?q q&"}IE| 9' oly?[}7aS5?J+ly?[}g~3Ghɿ.eBG ȟRQ_-g~3G;sO=FMAt8./?MODo;sO=߳ Z2o !wЧj'ԔW߳ gGѓ]?? 8S>[gG6wC<-z?֌q0]_)%~ٟ% d ҚfZuGwe{ OoQ\'}>?p騮gGhz?h{DtW3FO=4{=:j+DG'}=5ѣGh~?F`GMEs?hz?hDG}#O=4}ѣ>Q\'}>?p騮gGhz?h{DtW3FO=4{=:j+DG'}=5ѣGh~?F`GMEs?hz?hDG}#O=4}ѣ>Q\'}>?p騮gGhz?h{DtW3FO=4{=:j+DG'}=5ѣGh~?F`GMEs?hz?hDG}#O=4}ѣ>Q\'}>?p騮gGhz?h{DtW3FO=4{=:j+DG'}=5ѣGh~?F`GMEs?hz?hDG}#O=4}ѣ>Q\'}>?p騮gGhz?h{DtW3F,ٚ$j'O\jW-QEAEPEPEPEPࢊ+)-#rCwR{5It0c#͞?j߰xY<1eYCifXʹGa׭x;a'_jk}RGmי$R;0Ŵ#zWյwQN* zqZJGqWľ gS㏎~ /]B a) dKFUX0EP_6- ޑ_xA~խlA%ͱBV?b]CKYb)Zc/FfB2sjc?U?sԫKſox4 ihUQ>ͰDYwM/pxWŧM庺~foiV- ݷ[dlo6ʱ'gSv׬Qlm–h_sk | ^Ou(.$D+e#dS22̠ / j|J?SMdFd2 7??:#>h_¤b>#h_ÏZ^C O%Xg} "y-e&/#eq~~[||O^Z/"ڤ}kfHRWb*%Uk0m6)~V9?h?hzn a J1mH$"U4xrVsh(Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@5SҀ+Ik=fOXם@?Cj٬o"ճX=QE((((((((((((((((((+; 2Oټ;o~og=|Fwnq?n9˿/yoo}+Rk6N<Dyǽz;?P?./]5/.x "e|Ej4n)}_ywf~Jjt%wMdm;?P?.w5]~Q^C1")g;?P?.w5]~QGCxCGk_@?ֿ?IuaExwiA' V~gù~sZ' %9ɧLg1tYOKkC(_Q&$1")g;?P?.w5]~QGCxCGk_@?ֿ?IuaExwiA' V~gù~sZ' %9ɧLg1tYOKkC(_Q&$1")g;?P?.w5]~QGCxCGk_@?ֿ?IuaExwiA' V~gù~sZ' %9ɧLg1tY5E.hkmRK#Z_/cہ,93~'? BjK #??fo ZM_- .*pbݟc L3ZV+%{*hlQE~Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@Q@fk  ӬdOЅT>$E7OΏ1??:|m!X'>V,Bz2+ӣ#D ?G*L?G*"ZO[!4\CFb?sj(Nj\hrjR7Rږ/S6?OO@T=ѽ??:̹\߇*[O9aۿA??:}jz2>abx~u2j 'VYNzw5":U5We<_0m2.^U^2. kvj[Ca,} 3kzʥ?jSZ)[tTyoPAt})i˴r:ROγ֨ي jj-C>OV+7'ڴ3<ߑʗ\+3*CYy g`խJļ?Ļǧq*%Za?!Up} Z^ۓ=SBLzM1??:Uo1Sҵ\HjU6?OW,[Kn{ڊobx~uffe_}>kCQt.a?:XZ}GQz{M_ wNe@74vuS<ؿ͋̋3Uj=”繬͏Vش6*E0 y ZȪ|1iƷ32s~ʡX'oj˓G3ry򞣵t5Bd%R5Fzn%E;kzʍ*س|3 /MnxlExЍhv _1QREPEPEPࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPOQ٠|xN5H_쨠g|3n8.x,?> g ٍ9C[jIMmMsΟ(ko5 Z#SaA[]iwƆ8D!XeTC^4_س~k^*uWT6ۙbGpB)2ۙH2S]}Oß:>x_LOW/ywoֆ.!IUl-aI 8W{ X-(_om7w=imc\,P!_2+F [o|i|C3Cqi ->?cu.+B$ѼH~P2 5K޳a͡oo_>:΍~avu p:66\z`oǯ__ Ǣ\@2kPS(&IaUo'>:X N{9\:ZFo4T)9ۉ~"xß|cWz"S%HT[%hiJs;^}+׎?nSa־t{+a)Y[hVK;LvÐQ֑? ~F +FUM{OjXMo"Ki$Ch ?6eI~_ >/?6 OAx"NtM̖"ž[$IiHf5>xߵ[ZXڥbdYP1XՄԆbJ]AJW"~؟D!/W4}>4 0G]O38'տW|Hj>2t6J6nce``!Tޤ#S JJ~Q_5-[ZωtiT'R#R(;\H2`mB?i_OxOEzx4#x&2 *lePY[:M+|m #Y'%֭~-]:?1 +-<ۅk?h?7'o~Ϻ^5[Yb y+ԐZlfP.3eF`./m_\~ xn,+D[ͻww࢟kT>p~٥XG. Zn\P2H[j\H&]GH\O8IduHAMs+CN 3K_?|q||+~)Mkwqoi9LJ4[a֏d¿g;{sN(97dm'D4zQY(((+7 3XVue_Q򳯉IO'.fe&uzo 3w_rU*$Dp 3`dB릥ƽ*_&U47g R{)ᄦ(BZ 8b[TENVтv^oD~Yg[¤ORʬߒ՛_ߒ_ {atT.̡Vp@ _w_C_oxD0^Z QZ%[ FV+{HEuI;=%fqeMZQo$emz6ֱJR5?w㶳^:CJI.Ej$pZ[ v3 `|zo>{ѬOi[}G՘4'~t-j٢|qi⸪"jД-:j6qirMv^w;iqN*35J.RM_]M+xQMwXѝUPI'k3EڧZ?b(vrYJ D <9FKlx+,(mѶFdu,9ߊxWͣiz^qSne(c,rV\]NZ#k'd֗Z<4):tg)R-2Uwv?S>=,kV׶/wj"3kF+8峼moǟz!G@=OS~戈%$ȘA ǠT>ĺN=5{=vqUT?geη[-o{Xǝ;'|+&}F__fem- 7cZnǣaGk zG^LsyhC2JFK>c}ߴoRu}gIbӥ?r]Ou0%b\r>~VJXJ*MuYCNxrQF˙9E_gw[WjٮM-[Oˁ`#fűՊ& |4} $MUsm.QF4舯$*|u#V5[JJ'yss$Ֆm=uzpJTn2mI;J~ii'Wc+ʄ\pI_xK#?[|I.XgX oʺ.p& S]W_e |\k\~\"Y-,8+¿xyҜ+ҜNIsZI]jqR"a:a{5v掮.FJ^{"KiH/h؎ !;N8ȫ[|U>\xRWCuj5AdR)O+pǚ~3iO \zmƧl#UDŽe& 瑆牧F:J|mI'mzu7}<5J樨r'k%}7It}x +yOܵbbYHfS<a#Vcp8uo JohrZY2Ρъ R8 _$f#Ε˥hSze޾5E]۳|߲oῇ:FO\xKִ=.m/KM*#-0me\^jοd:(ne{{[E8W;y|)^Դ߽_[oMO:Ksf ?G_ ._v~A|`КK,+l?|A7S(~!ßx˛E/?UꢿXJUjF7m/SV4ʤI>pex/,ѵx;M3s# Y_T ~~ڞWa'ʭz& $sxxRxݒl>]EƻMFg[۾HF,Hv} _ǽO|PWeǟmb#H^ #-a{Wip<)Z>C| 0phUi*N+.kk}ǺQ__|g_~x~MpVHY|,OWbGJi_gۡY|mY\[oJ+#LS|b7R#uyxNrJje{JEHFI: i-_*v_qoE~w~՟|Ig_φzލ⸑#`ݤ0H@#qG~Z_n3~NgjDžmۨZ 9(^2IR7 N")QJW4]>vDbx+T:-dWӦME~j|G+쥢pqk:-_Èn$`)G`IO|]W??o*4҄oPo̍ߘZ|' 9QkfɧtCxqf J5U?OUf>?qƃ߇5:֫Wݟ"]2d%rG;߁pt)wR,2s 5VpJ|)iCѩQib9T{뭭nǍ;.:6!a1cΧwb|C:y7Xf]GNT"G;5oğ>k$ukZ7Ror q*y+_OpyB)j-v֌ۊ10˫ԅѫ.di-Ӓt] ++|-@|LJ +]ԢR U2F#kq߇^^>b[:YrAx~Z#Oy_sq 4u%l?Ŷꖚ}l}uE|!N|r?J;[ũo Ƴs`@w|(|.x'𾥠?m)7u;9>sieHZ"]8ZrZ y}l˙K F8r[h/>T}}~$K?lYɸj~+ҫw*M+Yb4' ΑB:?ߍZw1W'P\B7`]5b:Jի6uD.һꮎj4hr퐅I$Cx.n6stoΫjvhn6yf_=k C2'TN-ߖQiaxn=N+RJѕuim :(\p(((((((((((((+xyc5.*$MI\Oį ]xϊ|cq[{E4g)/-r@$.4 >CoχWWox1St:@sȊU~D|6kO]i/PAf͝ljΥ_id0 ~~?֏'2G^n|VO>e34߱^*2*}GR\OxMO-dD5&M,x$l8%ДqŞ!75U<*GfCjn j3c__׀>=ӟ,4X>7/΋k({&m~P1{ھOğ߳m?f&% s R7H`DuxǞtrHlo5YR5\d]|_d0`J~r| 5Y>PK}-o呂!#b^A◳cME|y鿵> !jG&v}a0$`HVTUWۊ0]|X}?Υokϥ[ؕKĊm P w(_f tWWOx6O|.) 3x=æh,- e,D]ʪH$?>f?mٷᆩG[ #a]hJ "0(.~_C }Z%yoe-%7gـpx4*zEs3_Y|8k z<Ɵed'3JTiF s“?־(|6q ^xFRILMc4T^8в)*Â0O ,>46|ExKV.M I?vxt^it]k'^i i1ZZGmx)4Z4@#{IPf!?l㏅|mxONd4nč0n扚Iwk1哅^.}|>^^]d 3ұ)"c,m_Oi:ͤ]C<ͷy1'kcϟϏa'.A&qM,G4i25HRUm$0OZKG7D¯XMQG&"<񈼢p6~裗MGsj+r?ݗ8c|/Nx{NA$gNZ1+~0|_f_@xWqGp,mcd(WNʠ GtWo~" ,+_k IieskFQDR"*Y@C'd+|qOٷ{.MG!;ΑQ$e|{;EFf=\h ]V?3 ]}cʗ۟¾ſ?>Fc6O麎;[U+M$ѱ g%If?utFf]6{NBv5nw#oꖛ~}k2ekoNS`]CI/S__igy/nY[E,F903L9Sib#(i菅0xB6^_V}S㖱rT< xAtb%Qn n}lmD0bۙ'K? ~*j|L5C?Z*Er/bV*J*JUUV֒krQVY-sX-s^l&`;m#A#ZU:/kJLAETQEQEQEࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEP|+xqwtᙿljw2rA~x} h#_m>_~ݷ3W >1o,?lG&M>,dq<6-6)"XZ;vӯ)IT8jC_|-ޝj]͜KXtF5|4qvׅtKMd{-.N٣VD0ۼf(ʣU @ӗKɴ=6O)lKZDldbܯp0p1UI%`hl)}߈Jm5hىE3gU.y DxOK?^7%mf塜ɕxeڲ#)V`?V> =χᘴ[1Z'#i#1mrƓ𞕤XYieQY,R] 0Xv9VMYt <<[M;q-bISfx$~g??:.u=杦[ZK5vWe8$yk_ x|8סt{8v!4߷smݜd㩥V>S 8gş >.i7 > %'<綖0"n%+~?k~68 x[ovsI`bR+5.)yBxa_Q\񗂴kQAӭ睂 G`ĊmE>KXm,Kx"BGUPI)N/[j 4~kS~^Iw\h'młGQWq~O߳_{]&(>.ݚI!CBTn;^G3࿌5V|S_ڔK.iȃ-'щzFht>cm[.-m"H s$hNr8?ݯo7;k%;GQPԴs*>1T|n }.?Igžyk +gS~Cc&iK($q#N>_!(С.&~߁5?i<geɗwioß^_xCº&s.˹:KlJƥI8by4Y4Дu?&@i|M=BP??h}t[q8q/E\wl0O223|DŽ|< ]?|/qb:}EgFnmu5hz/4_it'. }9+0t _3~.ֵYBMgy pBiM+j)];mJM'P{!bEpʈǖU|OQ_+ǃ|hvۻ-: 0BʨA+gş xKi|umofmVӭfxܠb8p3҈1i 6|k$`Ι޽S V}^7?W=}O [zYaiβ=J+rX2n^oCд? VM'MRv0%*Ib(¢Ĝ9$s5hQ@Q@Q@~fFοL7 3XVu>#;}eL>/]5/. 43ڏ–/ZhhZ1|AKY.~T`릥*4RtpUA=A6yv _.c(A&׹Ls(ͨsrbj2_fQ&ӱ=Cᓭw]tc:1S y} \¿_?~ |B 6+cR)YFDiJ~C~u1o0N]a&sb><_c_70r M5'Wtdv?1昊٥t\ӊ2rwez߲'i~|\/x.s,rL6<2<AKWUCA즲֡ ^.p#dVp2H50-𞍬^ fK;HM4/HBCom h:*"=w` Ml%F!Zm8ir'5p6Q֌Zi8Ǚ}s?H߄.3[^[MIͳ$,``䌊tYҟO&ǂ,Ϩ4M8ϐ>U̚eHV`1K:WM߄UuM}n-5)D5HFx|3 .Ѭ4[Fm v&!6aZPNrJ׽}| j(ьyZ6[o{i~~3__CC36ħj W3^ xDWw \MkzHfHwYGҾ=#_o|%aK-F(`.Q73tnMDӼ3i#vF?FU~LJ ҷ3MI8f[odwg F> Joʚq^IFrmZ+]݋׶{g=fF$D2[h]GsUW_+/Zv ^$-,b 8iqV8|E&:1+4'%(}n)KUZUctڴ%?+?i,~KBm!Cqw=MFERXqcqƲy[s{粎!.q'CG= biu]KqّX`+?c6񖟢M҄7kh_}VF$^{YFԭN:n4Q)+_Mez.o:5kNJyZWm<|LX>|.ĚNJ.Wʷ 12L#Rey#T2Ix߂^=ɵU. Gp6?ah`s~h kX>!M3S;4!n]z $&U)$r(duaY 9u')JJm7'8t^O3ׯxw^qbQP:vmj?=<~׿ дF6նRǺ2(}xž~-5$NڍWp *Z'0KJ^=rm&VI$~g cWV>tySQIm?<k ?_MyfזpoG(=/}2]ĺmNTi}\QVF0G5R:p?y9?;ϟO[*N8+Sw 6S{iXD<>˪DP=7K`kd^_zռ1i֚HEm1dWҖZ^i}}+o 1,pG .Ս#PP/@b |<.#+D\˧jrI3g03EY5%nK=0#RST9&۴u_ GWݟ7_-?&:Ksf e?f/{cxD7? .gqc>:W1o}kfVq^UT-Q>hT5ρ/5DIh’5i vy+?kc|8Tׯnjmc.PIYb}x;ƶe N=B+F=V} A_xKum+NmLJU1u @#-VxbMb`@Uq_1˿<}m{ǤYik[yo FPlo.Jfi <պMIRLzLgk<'`fkpAiE.1[ds_C22'B^nRMJ*VWڻ>{l>6}褔cdi;~'ς& sk-ƏݴKn-<y#I6g5i?~YxSZG_wxm#XXv$w+aV5F6 mNm 9G eaQ\Ov$ i3̷fi&L1gPH A#<8x2L;|I$mݬי͜p?<\0xm5ge{2t |$&k>%ֹme 1`Gzʒn?,`ω|#sk߲uk_Gҭi10b/,C 9j/ UڞiwBģ L 00+> :'i?6v00 0@P0vN-`hmW2siy bqiri o$g֞w{ {Ycw_qZK?g/o+2G:T}p$#nH\ U4wOI, 5K<2.sVO:{>ѬtK)%iO7vXAbqW ^X?kBMjN5&ݍᚘF)JkWRVin5/C*nZMΊk{O(YW~M ^@/x:tkRKMNvY\ZC;bPw n?znukm@syyzzK>!,M2]m㸶o,L"G:W:.udR3ju9QwѽO&TkS*rpے4H_&~}coVZD[ƱC Q8r]VT{:ʒK=>.d+ثEι|Au-o  2 # 3ǥ~h> ƇP6}w' +_4[ Aƾe.+:oi5ȭMk|=>pVUE)4{&ǎ<;U"zMF%Rf|4WlmfVⴷvE ,hOIѴ}tNBJi A'(Ҵs.,.%wMݶr~[#\\V*=js4cwdŠ(=(((((((((((((KǺ4?k׃ lt (.mcj⏁xc[Ik"̓f8kþ+ht1iۡnNUAּI۩6}O_|ݠ@O~%3GDqǦ[-ИLAa!$}kf^5)Tx 5[,|;xڽ[]Z n?e˙e)IRG5'?>?q_߆~լ|9-Kaӭ<:D,ˉmpF?w^>f4Z4}iѐ@VKpx֚ǀ~h>2~CɍiKc=};hwNt3XIu(08#Q[i}:-;Qbr@h<,Eٍx*yƏ);a˪gz$hox;- +*frEo)! ̠K2-'_3xR?]3 oFm:a+jcAB[(kE_ M5],TӭVdB&a.ўΛVgf߃kK߉t{ sNvֺwP^Ue;dP$?,1Fo)LּeVP5t%[eD؅w .e7[?_٣9J.m3Z ۥŤp:7DX`' >KX^\*E#oDivG~[[iwOa ~< ,}6O`)`j1^ '߱UN Piq}+=w '[[WVTwVI5|pjk ^7-qIbǒ\m=3Uᯇd\;Z>-ʄM2 7+Q y?/o[ص/[G֮R{n$ :.F%<{V\+ FT9ZGӻV4<>9TSOݗ$+inndՓ?7|FwhMsC1pFվ%^W+[!HݦRs72'5 r S^8x$4q!}FȌ@'ע$hơUUW1Rq(J4RI.r<UjT9N:43mѫy+|(((((((((((((((((((((((((+dS0$/EtUxE{eRQ ڋy(ڋy+Tkʉ5HNp,*sГk;_近~=hҲb=5ѦHؔsN)}"MuXm KXY^_(T%SyҢ4 uWJ+j;i 4!Q*]puO³L3TGPuCQPjUĊj_Ώ/OTh,)^jqpQ1UohߘG?$!TecD5h__>j" #_E?:аb~ccl1SEpVJc ?ſ1G$PrT{8dz4ZF$>[r?m QZӈY ԣPIQgYrKF_{|~TNshif"-nOK~G 3A?:7??΢#C^=I?Yًy+*f/ߘj * Erk4PY?lO>؟5EMY7_Ϳ1G_Ϳ1\PYcmG~߷'McE TFۓouxlqs*rZIE\"TR5M r#Υ1mԯj`)<% j/ߘʼⴂRܤj/ߘF}k'AW;i?cQO}m?'Mr^sySfMjzM.gkV|= {յ^d&fŠ((((ࢊ+((3o ~;v]2 irQ ɔP|C;9o 3@Uf}:+%eVAG 0QH(((((((((((((((((((()@yճ'ݬk·l7Cj٬(@QEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQEQE+:2?k_~'ߣhxF; k`v&D K@ |q[I7#sO2ы\ԧ+өSBtVn.E{; 8# 8koX^G?~E |w %G7_q/`𔰋-P|48Y]jkA۞NV:kC6?axm %G7G0iv\GB~?x9t?3?f>9G,/ͣ|r"ᄾ;sH#Or? 5ɟC6?axm %G7G0iЮ_A訇LuQ 3hȯa/?4?ᄾ;sH\r? xsED>g,/ͣ|rX^G?~E |w %G7G+Akß*!C?]axm ?++K4 ?na/?4?.#\^Q 3h?f>9___it |wq }_X^G?C6#OK4 ?nˈW/ ׇ?TC&~ ?(WW0i_it\GB~?9t?3?f>9G,/ͣ|r"ᄾ;sH#Or? 5ɟC6?axm %G7G0iЮ_A訇LuQ 3hȯa/?4?ᄾ;sH\r? xsED>ga}& Mjqi:(ǭ~A|`К4ΪF@H{FO+wd{ n,bdUQ*;w̪瘬t}۾3|5l2L8Pqi]-d_h\Q ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( zۻ89ZgÿTmYhv3gPAnM!!UCc=HPHV=.7vW\(bgNONx{^gxt跱Zyl{n0÷1I4QE ( ( ( ( ( ( ( ( ( ( ( ( ( (3s?|.+Cn_ʷD7SH$l HU*ƍIճF&WcoDWg7w _% @0,$O?5ÿ{x&/F,2)"eObʐOeUqtbݯt6zVYynaQURWvJ]Q^Q@Q@Q@Q@Q@_< <[ ^i324S,klDnpTt׾]l z4שFwq}-m jP+R] B(C(Sį,}|^ ahbuv29\AgpxJ]͌Q"brgT4.짹)d;f㌊\NM5tWcᗅoZ{k##F^:1V*G+W*/^_{_,M'Uԗ:W׶ۢ+`(:ex/,ѵx;M3s# Y_T ~~ڞWa'ʭz& $sxxRxݒ(nZT%~kyqfP?Tu=z+N(>((((((((((((< ?u xö_`U w+kN4%{k;;+u1Wak (KKYh~Q\oԴ t8ee`NC+F NnEi-{{Ԫ¤JnzMwAETQ_0|c>G޹~"$&@  , n+Wa .}8 u#On!>f6&%8P+nN=XW29̲|n_5 e7W=-yuu_0+=((((((((((((((((((((((((((((((((((((((((((((((((((((K_*/'f$+Z>GTgyh<^g#1cG_Kr6F#9>5/WLiki>SԿ[ tͿœVvgϴ£W-o(*SAs>?O['kK0/֥ٟ"  T5i _>Ygy~N5kɩ@AV֟q<5j?*$ՄtTeR&)?X^Rʰd`ySyru#EbHn*_7jQ#eկ*_7j 5WحE?˓ 1uSZTo=Z~To?#u4\Rq#SAU%oj2Q*ztiF)WנWϬ´4F=\Ϭ¶ GFJ0=Պq<4}y?k>Oi4Ȳe pEG@ kz0} %Ux>+\֚ St_[MjVve[|z{6= {յXi~ fLQE@(((ࢊ+( 4Q@|Hs| ֹs}O“ݶ-tdHǻꃲ?߱WZVƥ`$fcoqF3Aꊮgk~? ?h-kfLl!6\,09^ M?BӴxk.% Qsxn<$Mv-M)4?৶᭔`qq.I 4*3$j\m~ϚNJe mIl'%#/̑~Q&@8R ~QOV?9>!(X_6qVֶ $QFrI$O$I0= C~9եt=^LMI(y#j6<=sY*G_-ŝ당i'|T0+6~^[n s!K :hM$C# <(b0@z*T%?6ϡ?^]`dT*v*$$CGGPn|CEs=qV;澙9x#xvbGcYYHetuVV!5oZqyuc6`n-Hg9NRn% YO#j(MER((((((((((((((Ju5Pi>c^t?lk= m[5Aڶk (Q@Q@VW"}llr}M1{S JT;3\8Cuqm!uP2ϻ+GW㫜c7<ͬWl[\䋉*5=Ƀ+00ڱbʮfv~_{HQcS|nE4 gh lr{3d[$t_%_>8^|I׮5ٴ+WXĒE:sҽW%kC~̞+U4v_kp.+Q.@, '+MufdM'Cxyiu-gZe1C|cmv-XkyeNW͌( FefIUaoړ_v.,6mvpJܒ)$>Gg;kߴuG.-4Kn59c0[Cdl,Ĭ f '?> -Z_BSs↛}|Oak)'M1u O9$]yӏ.4^>nc>>3Z~3k6&_Ekur'PK2/v|=/^.A+/tֶlu ՊG$Ah,O؟omFE헇]fM* @['2ݣ!GxL/#mڛ~x\I&\ZB,{k>SCT%?ڋOQ|OIlB{fMY8.fhr?+Te+XK1G>%?R?ŸvJ.}kKgǶ^ m$EO=yH,]8"G 9+KŸio0ƍ)`qvpm9qN"Ѿ*Xyks [􏆶n"N,٬<1us-~d( COx/jiTt%KP_\ +FUc#v4 ( ( ( ( ( ( ( ( ( ( ץ[|W ʷ:Ww e<08Qq_Zh+go,_R,cp"bڳC;\r98"vΜej8ioe|Ty¤%5yi-l,M[I"0<hm(PJcW^EoANjKRE)5{jI,[Elm1%+g|2u}NPK,g[&?ՙ }so4=+W OWx[ElbX+#2"H;Kܾ|'y}um;cfFWar_:/oe5eיvn"N3AΰrXX{ѬOi[}G՘42dx$MO|NZ=E{iosw%d;#aq4Mq)ڲz#BZޡ.<${xْ9Q K yaS4jhkbC!=%M7u܅Ux34_|g|Uj|7>s+^@7oq'%02@ʪcY{lsٹ*:ZMI}v_-OW]Akqh mn _1_q^oa򡊮ubh*&3PID޶Oc\N"5J7M6NqMz{V%ݵeE3ġXaCnr7##gHχ(.V¾6mKKs-7WQEmkvAc*0ݧk45OzֲKdDL\n !fLV`Ɵ[~>?Q{eè4;NVNJ՟/}`#8j(JUxNNPmEr|_Uc_>*{3P~)]XZ4]\-o?~<{־ ":zC4DG!,1&DhVv=/[Ÿ/KW0_~&yQO$j.'{#Vv214Jb%Ug7+JJ6D5)RX,EyGL)s;(F.wNNm.p>SmaZ؋ɞxo-hf\Hcz/ӿ~2[_jN1,ZtKY$@+NA~?i,~KBm!Cqw=MFERXqcqƲy[s{粎!.qPխS]OcY8tImy4O W:o?{V^Ks?ggeZ&J]CCcd|~\^AZx>o|K&˪d(JtDWwNB>5X_ >&|, ^McV+[N&PD2$h~_ <]/dڏ*Z#FYCge@ܹ&%g4# sJ<FI|vUSa:M^S'+OM{Wgo ~|' rO?𓶗,X  MWErr+?O u 뚽R#Wk<g;A3{oQY׼WfmЛG!|{C b)b퍹0~~𗈣0p+OXd]B8$nd ?x *Ͱg[.Ehr$ed،񔲜DEPoϖwm~%WԮ#ZMEYS#)1渟oSpf{q1[mQ##doamɂ9j~&^qɴx_o\yejtaT`iN3"^kK'ˢ;sJ5+O4M:1i$.k4uVx oٿHkZ^{,~dwloz6#n#{7oΑxW KKJſGYW;Wڼ↛{=ج"i xvy"(I舥5쿲w/x?|/H\4MV$oo"uuUpZ6B2+0^<7<=8u;Fɫ]k5C/ q,1B^INvZ>+C'_n|%^*Fu x-xڱEZx U|7Ǿ5PhJSW8Tni?||*2 wĚ{/0}=-,4ˏ>F.YJAG[y?gߍ~ʚ#7:]fI< RBƭ#A.27EbX0=s|Aִo|{MPUey I+ TU,[\:Qm5~…|ڤtyNqy~1'$eKUˣm[z5 %ዽ獼WjEdJU2v tV~ͺś߈geŸBN?ɸ.U7#u,7Vmx/nm4 [Um(mF>H"U$eA_ga%υx_|pZ[ίIg2ۘ603]Y6 JUqRViɫF:sgX~,^"TKIN[;~n?gǟY{3ឧwG+$H,'i mlqexVۯھq{[p<օng,8hՋR-)^<6Ӳj8]*R2JSp3I5vo}|G+쥢pqk:-_Èn$`)G`IO|]W??o*4҄oPo̍ߘZ?e?3^_xKe đ[ &UF^ o]g.-|YͩI_@wJT.2 eEZPIC ZRoMijڿڴe Nka0ͷSF1K]+hټew?Zg|h> sQjmq-%!vBP)T|sۋkOґz!R3';N˞3_LGDJUQ;_~ |B^mw -#4z] ŏ [09˲1p EsZY9uߩj mxf/zw9|C'qy!ee{tA;$|۟MHx jV#?;}>3 #|DԶFA_ޝMXcI`fbi|y?0&sǗv^=_ßòY^xVSTÙ^ F ᣝ!'ʩ5Vyg^ $Τ"uSNi\|{ir'M~.-*$t)b[$ 2ݺƤر9z׉d8>1 WFCKiȸ1lA+7/o7:}.m2 +,I}O4~X#~ ]mk]Qn-@nLz2Wp ]*x¯#9❒m=s%:_WsjMI[^țşP|H4;ma\-<4&dSSki?j x Zg=叇X+,<-i"ryǿcmbSɘi-r5}j4)ƿ_n՝\Uz%nkZ;i8m㟍e$6)-{eXtQ(P64l718`>eM7 A[Mj nqu2̐08ڣ-.zxomzV(O+4M[ȋo%זHA$nW rG ~'W C, 8]i{-aqԱ5qq2%R~imwcпf/#ľ"?o9+TVXXVeX=wj!{ݽclC1#3ł2x?^Onm8u-3LiF{pς0AFC6ᜆS?j?2IMs:A,rQurp9ޥJqajT#,D]?uEOJ7,ZkV*bi`f9)*ͥL ̨&>L&G,y|PZ xM*0UT(6SP'欄?C\xtXz9I4N?rVov\$|]~8xwMٛ-I !utÝ2=IE|\'x?dRx9VhbshxwR-+*Jq_j2rAH5C1S^Ч4/z_MٳSCԃ({$eQ'$l>_G_V+xzo- ƍD#*ʲl.]m`I&ߴ'j?|!}( YJI-# +@䂧?Ph_+귚J5˓al荸O"@H@Hg`#`і^j55Rjt˝%>Duk.ugE kQ刼%qN%+GM5+;m,:|T3|][#vTɽ9GOCOo? ~d 0XA8iՐ6,Fp ʱx6""VRF>ΕwetZfh,'^:8vnrc/f{]ٷ~5 CM|QIMR7[Rhfv`7>) ea?5|U~^MCJdUb°) + Ppk+ֳ<`;гmuȿm-_8O}oDIhZ:{CqmpHFOG5t~S஻KXڤw2gmRp^/F\\NژB- G乎 8 Cܫ-x RJz쟴v|<.s`slc)i&׳W< O#]A.jkfl7IE~?_ ~ |a+![{yP"AIL,O!BkjeNJ_ ml䢔ޙpM jQQrn+h(CIio,zԶ?J'g |uB CWzq\ZDK@s̫Q`}ͼWvN7E24n=U~O|?~.4\$_)S,nXu\3N|ZJpM%ꏆjf V3J1sMMnof}eh5|qC\mmKO,ʙwRE*xQ_9>"h?߂wZK v;ynwK=$CfskW<SkrEi+C)XaެQ7"eT$G|k=B30Jqzs^>J7=67OѢ{l<9 1Ķgi-yʹg?fK_-xlmnBKYAY'l]źF,}m7R2mSZg/,5z֗jFɭ6#E'ᰌ)V˩ՠoƴ#x6QIkQjROyJIM9sZi.Goܛ\vem> ?'ےK'ée\B^'U2ce_^(xNHoW]ƱHvHܼ 'n|8ךgt zެEZ(f2T0{a~_|AZLJ4VRMv`t_y%2Wnk{ENZQ~4ⓕokT,n_sqҢ4%dP*@\@^sL?O|ucgf00rZmjn~VxWbiS:MIZ>+C(((((((((((((((((((((((((((((((((((((((((((((((((wş1kZ:STnJ!#`އkzʺEȉQƸ96?};ts[Nއk}oZ}?~=ȹoCT[[_VúP >U{ʨ_|S9k{X}?O__ITpL Rғkzʻ|%ktx}?,UhC/ৎC&B*/*_7kOH"S 'XZG"?q#SoWsg"g"u_x]?/5nJre}=v_}?$]>?dV6=#};缷68}w}"TG{D};q[?tT?"'U&_"׷C)#~Q5G˓/ϯD>!zHiwGUKߍzxG;O'D}>H=M]0>Q9y5_O"@s}O^'5k5B? GƲMFY{iX~_=N =W7^',$CI6c_&[֊A"y}hYנ[/iIeiʼiE7MioD=OSLV>I$q?y?OAs}'U'C[5.޲UE7MK8 ǽ_ E7MUz\Ͻ ETj0<_As}'G OiͨN/K[m8=/>oWIo=:ˤF9eE\~k&sG\^+:iw-fpˎ?N5`$o~i?O"I#mE_^'^=(G?~"yuohWm&"Jsϫ'S[Z!"yd?_&bkW{HAEz :?{\IU{xTWk??E"ymF՚6@VO&N=n?hi~__&>>uEUby-9>XH?@9(u?m(UI? [?m}O%~M \IRk?^"y-uk?[OKNX{HYX~]"_&l#;IcMC:9Z+~k?g5 tO:g{#y~z>vghA];i`tnmja0c)6*o w7|Kg}?Jixe1DblȫOoo xSDUJ_Xi]Rg(eX  vQv'/Qw>~H..,#-YI%!G ntG<6 N,Ʒmḇ*os]E]>!=źx> 5}U {}\\|̒Fv6cвiM3xNkݻVN%9O܎:WmE`qZ__Z'|cWDZVmtxok)#rwNM_^"|AmS, FSӭ/mdʞHDܖM6֊.AahrvQ3޳;k.oxrK8&m&Ѥ X݌Yc!Xk(EPEPEPEPEPEPEPEPEPEPL8FU A4(ˢ!jLk0ézlI;@R[֭om7+wmfphAF_&Owz6z/olabe,@ "-7Ltkt" ;e k1計QJxӂ9mz! Ju!vW~ å|DѾ-EZTYY"TKcYT0gw>N Ef[la-rk,;fZ*nI-Gd|(Wqi^ͷ?4 Xkl"pR }Ѭ/ˇfTM/@4 Zw46:]6vяEkO1gR _Vy;vbj'7:-vWUW}XW⿇vп9on6\<`BeJ+ʥZ)sғOvgV:f[^ӗHeXEFOsF9{j^4.Vο[73oUi[s˒r9訧,EWܟIr+M;"sFaTkHDC)E+0$w5_~_b񞅧k@MB;)HꨧMh3kgwaK JQ%g׹PvOCX9Z}1oWevyvdV,$$ O⿀*iە)BZ$KKz|'_`þ4LKamt@Ww|c=뻞.4TȡՆ <Gj*՗=Y6sѥJfזpoG(=/}2]ĺmNTi}\QVF0G5Ef\vnvԷBW׾-/LӴtm> ]>8#jƑ `1\熾x wW敢\_.ey@9j(U(;K}w6'xޝ(B(c wmR-zW*{¾7xL|y[1yT=ɮb{?c{]}\Vԭ%`u),3 9VV0=w¿^_7P z-S֧Ni=n׸aԒ[6קc۟^3|'eY\3#G˽d5 O!- {tb1;C=_ <;ol4{HY&ic,V(UT q/[š_ik:N4zRFɑKvc[^?55ymsʞG3rW99lm͵,,,48t2;KtE*+G¿6MK*^C𞯬YxVtWM+X?Լ|sRv?-bŘ9-8m+ 'E%^qjO]; Цԓ^F L]+MYA.c@nf,p9$ɪ Z-GM;YRyPȐVW訡WT7{M8^9?x 5kwtysxac#ݴgz ~|1xwrre[[ȧ4q8B/k}<Sڸ.nWŠ(c(+W4eiݼm8ީ`SE]*%:m)B\*$GxG/\|/hNZ4f+ſl_~4/ jZFJ(.T.ڽ=; Zp\f66 @tU+))%H $ɬC~{ vԞtZEhˌoqf95QYrzjEizv (4 ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( k^ 5ڪͧ_\%>Ӝ`[~_S+Ey,L{q>S ,:x^M ;֭V_mݞ<8 Qn$E+8/W' 3ک}VAOJexgS?r&U?_${uz?_;%W[1[_ӟE)<=H.-K^o/)`;G NŤH_BgQ {?3ګoYҼcBgROL_TY[ ɟR?8oouK Un"=YcP맏ƛVPƔ\_Y[8ɒyk[]dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S4}EEOs?GmtQ.dT ?S54PE|RC{ (aEPEPEPEPࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPuJkLK ݼh_kppQ⾹mX{P7Xq?_z5˕]>q Xq?xTWxU<^Dh6{ysx?Edlj__𪧇`յ+Ks+wZ6gxC^]|%ִ~ȍ>SN_+r236o^߇^nԾ /dU?UX~1AX 3O\Rs;ʪ͐O<`kQEY$bgh[MKh¿nS~"osDL^}=? O! MD跰YoCwU5Z}rg܏(lA  * qW9_Z֬/ZkWWc_'ivkj u;;Dw[k7 ofi2nD|qɣv[C{> |.=i4"h -OS2 4H}߄d^A5NVH4.ґqh;wOFMz_tVLڦ+OB_{"C[0%`^2 C74w,_S^kq4oaW^u K_~OwׅMGo5d!ƅܖlykJBQE# ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEP_ (GW?ߵn`OȶG7Wo?/;y+OZ|!-߈A֝ȶ97cB '+/_ GL[Rkcm͋*' ;LkbAdbq}};|LO|% QKqb` ѱkkcdz|IC:tl$eq(7@os;%\>24i1rK4N9ٿ~[n#;|~񖓯Kc%`"(&k_p19=->MR ߳g}/؟YtWX9';uP/j^>4/h4:7Fl.{gvʅ幉A"H"&(. ີW Jlm3v`Ap-Ws=Ey`"4j;ydt(RWj@,((((((((((((++|\O ovۗQ%# $G,@L/zU{ LsioAqrS LQ#WGĶ2eGęl/ 9Fjͯocu!AJ'`XI>~3k!S.M_󒍏XeP E*REʞ k+'n-Hh&J#iRc>x[|q'RԥgM^ZK![%gI`:U1y,-Jk)9Ӿnc K7"UZ->fҺjۧOUϥo'CQ_?|>râ֨]Ya!?-C|#,ᰁ@쿲ƿľ%!d2y a@I6,UIC"pz8$JYƬ':VwvzJّ8*3*Irmc̕3kmgмu.]k3EHഷngbH?>2|)X\F0h%$x$L@ 'U:^N< ܶV{ʣ8ҩ.E6vͫ^ֹ'E|#7Dݏ%kW:Η'4y>2dx$MO|NZ=E{iosw%d;#aqU\E:ա)ZtNml嶛v=JTg>j>].澻$~V>;Of㯌O_;On~bP+]7D䲕f@UCxrxԪN擽J6Fwf02IAΤڷy4][}QkE|'/>^խ|Iwmho}jQL(V(XPۣm܍YRGO<O\6Ώfeh;\|Ǣ;G/h/fΓŧJ仺aJ}JN0#8T+QOwG*Nuqt ӂ2r_{Uf?F(0՟]k߈?i)u Zq FcpM{_MjiI.C.9"\(i_I9 T/1'A`FjN*I-vTz])ʔ$efړyy[NMџkW|w#w+ٯᵒ- NVG+0 7ƾ6}G$ ;i~]α{uߕt\,L,"/˾PιE)WiN5fk5ĸۍOIGcn'p%{mLA# NG^u"ړNooxj QQsN>xVƞ1k=&ij̑<̦y$F.p8Z(3ڵ724eC`rp@"?lH#~Gŝ+_JЧc#}Fkf|igqٿd?t{Ğ2hz\^-TF-Z?`ʹڼ/N՝~R>umWQqץ߆n] (q"vR/giտ{+5ޚ[E'օW/7>sYsqh?/ޏ({+2϶VRyǖk0E={U^u|v[{u>Wq8f*'N5c(i3eg/? ]6,-Ʒ\wMar1`v ҧ9V%UN2gnwjqWK V#Fs$IFօݖ;/7;jϏ>$/g=NFWq\HY On$ 8#ǃ?l/_?'S}Zmmy yܯ$pIѧ[^(+ߚ.h^"1<]NQН^HMǖܲWOY+w&5>#lR~8xU5AaN7y0Vv$'޾.yM+Z~jiB7{nr7Fga>E҆Ө5MoӺk8%RzƚAVNڟX^8AoÚNUu[o+n.ϙ X۲fOڣx~]x]:~) i9Ov\if?ٸv>g}4(˿ M=u}7E~n mxf/zw9|C'qy!ee{tA;$|۟MHx jV#?;}>3 #|DԶFA%µ*UK^yһO|#4ҩWBt"$k\>բ2_7'Ķ/4UХIl. ʻvbvP?>^%+?_Oi-[" rp )8ԜV*BsT{FOM'kjM;.. ӝ8:弢i6>+Y u/ ~ 4M]ʑ*#ȬJiVE8=O7VsX{NO#r'(0W+KYbsr7yi?u^]4G QϕM+GX7eniM证4o~7SLxLbf D@ڬѰ<ᗇk? 4Ho ۤ-n75=ˆ_2@pj  Zu63M-]:T'RU).UmnV+ً/9c!>_Uq,Yc$EgrAS WugV O^W }tVיqQ)ϓNe;7ml;j{~ך|!]}$nK"˖)̊~/_w|#Zθg3,F(24${A~v_o;~[i2% g2#޼wIѮ!׊<;i+LІ eX2p 8 ?m-_8O}oDIhZ:{CqmpHFOG5{,brZjMn,(&wdTf|4.RaZ[95-ҿ6?`? |PPޭ-W3|3*m9[cӕ:\E;9JM_[%fS N-5yWQRm%ӹ P_Śv˒HۜCȭ0O:a_k'k!ռ1nږt>Y#3&$vT)ꢾs~8|DaZwqP(>zH d2͌<צ aⲨ7:QWM;^^=O1q++WeSWjګ ֚?ۇƞ?FSQ0,{՝di@#6ӻY{ڲ\5/~( }: ,^^sg3yaf0\3XtTW̠n2{'tz&YQӯNt+sI)En՛jӳCz+7+hQiO>ܒ^>H -:'H .xOoCtG|7m-̐b5x@˲D|`vNSiS q[%'etў5Jp%8$wfgf}E~d|>~7-ׇgaA%Moڵy^i\@R H veW43KӮn#ATF$X( 8|+_Bok 9rEҽZۣzUC^e8{HA$z'.+叨 ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?;诽"dFŒ``z}*3\Fb#c#t簯Yz? x*{~5ԚxyD<]N>[_3|>O0^.ZOk:__QK _/~Q_>t~sSY_}ev??/J{R )PI~bWcQЏI?MIX?wX]}S?"ld/OD8>KxVgW֗c㚞z}kߵOQ _>O}'ڧ) ǫ~x=c?OD!CV|=AR c c]G5} nC;q>VՌOֵQᛷzڎv*+@?`4cN׵=|_ikEOmeKmE'tA3Cvإٛ$I$#kYK\Ms%E鿗^7q^#|6|+@ZO|\k^It@vE2|r# 4?h/&?W/lf{_;C_4_cM!|P{OK?hmCvŤW}?XOrQE# ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPW3LN}%KFv&F=iya$1^1{/+F?nO/yW?ٟi<}|'|s_~&`G<%jͯj7m^KndF.9\7yVʫI(ˮGDe4w:t!"EW+mxO*#a8=~Lg~|iͨiMytZc&F<9 ? kÞдj-6_9IxY{rcjO 4qS4)ssisLbG4g'zV/s ~>j?iskMB9(߆ $qT A#8$?hIt+ؚ/ZWΏ}؈Ůsad :FNmt?j x*2hO0~v&HӐGǩ?ZMaЬHYͫM.8YKj<wz}:0pK"3CB9UK.gGnMC)GhG j;((((((((((((((+4g|2u}NPK,g[&?ՙ }so4=+W OWx[ElbX+#2"H;Kܾ|'y}um;cfFWar_:/oe5eיvn"N3A;_/_ږm'z]{+n.e6GίrۺW4m؞+HVu,W|FVBfofզ',tqQs{)ǚ JMw43٦i4j'eeW`2|B# `2'?|n_{xwG4-X-նF2{dz7<%CRiwuŕVɹ|J۝\'V;i׆*Sja.fdo-\ T<-Fཛྷ&XqK֯WcE)bE}^?_egaiuMLFw5gh,1PJ#OxNoE t]9#0Ū5F"!Bf ;/VYxBӵ b&md$pHkK1)7Iэ)Y9)E̫p!JX=Tҫե-]Q_O~/? c4_i-h2*œ'dk^5[4}(te[=w) wH84.? k+O ӭ[/N̊œ_)Wu᷌n!H[Bﲲ7@q"+3jtiqJeiI^WMk/%s~)ѫZuUFW]ҽne_5gu$xwW]An1"6=ѕC~,Kna$umbePT8IU0 @8a8 XRP%ӓi4I$d8[zʚMݷvm]gyzm{&}}/ N_дI4vȌb9E0~Q1\4/%m5m:r[KBwG"6G9:ӄ ||=9RqMEEyZ&=O(iM&LJn]W"dX^'|R7>wԅέNm RF,~WWUWl,1";MF [xm!c8Qvix xk瀼wu}iZ%]>WƪH8k+Z%?oPu[;ʹ|{c[4UBn2R8E[3}쩨k>;sZ_Kyj˅$,j4#tV!Հ#~':kFq'>^+uYv]G4E\Ÿ wmR-zW*{¾7xL|y[1yT=ɯyMz2r=k9Z6-spmBhbpg+m{~gB]]sO٬EVܒTad+RFTj?\<]'5ig+Nđ]yj-c53{egZKaմRXfA$r!ꬬ`{+~/ J sia3 n]P ĸOֺxS hƕ{s&䝹[+^SOn^KMqih>˗G@FClv+q~>>\&GLh2[ʷFbu<'w̫qZ_?gNz5޳=fF/){ckτ 5CCZA3_K (#9Bbw xVqO-̠>zdIIOM*QC2fJPފ-^-5GSC8g_.q, Ibl e^n |E|"2xWŚڙ4tJbi<*c-G<[nrXE5Meqp50R/2ti?HĿ;xs:CM~a>>L/{⿇? >>7d27mo2KFuYw/ )5&o.Jfi <պMIRLzLgke)x'E9x >{14m ^-9ŧk$޶nqb20ui$%(%7vwKKod|=kM&xKLJ4<_Fa-<9&A߈>~?|;hZT{kۋv"Fތ;6Gj Ҿ #RĺNXʀ{ v:<=5vZń̇*Lr)*y KJJj*iɸ袬I+sb8gUԕ)R*j)Ky;nI#?ɍzoO &aطW5ULJ|?w ]sh:|X_n4hNҴ;=%#xV8V)3*㑌"y ]oi+5oS؆I8V+YRvw?,~="?Դ1>a ۆreO<q'5 g˺YcaGz(X .;Lp,QF( zڇ?6e5Zٴa'Ʉ㝅O5}ϊ1_~ZFi:%cl!a5T@GVv P] hv/󮿳bUܞ[&)Ɵ,epcMwճ/iaʧ4*S{.~GxʾkҾ|7O3.úgi,ϚB:: ~E 9|aGO]Qm"FF U-; x+6ZAӵȠb.kϖǂSV*H1]N5)(%(҅6.Y =8q|E͸j%(A/ɭ?<~iǽ;B5]_UQ0\ gDmyBC;gů7/"<i%Hhqjɩl!=3]FֺΠ.-">SYD21W6/ԕtO٧9JWm{l:+Rh6 ѨC? [S)_(z?~ѿ!x̃AfKH#g !ڀڲt6wig A{ػmQO&xh*4dYI[ B+"C?5[cp9\+3Jvilb"eJ/eu X9I)F[]4ݺMOۧVuZ_"N'k? V#m@vO~4Mw^&miŪ<%q\ 1@M}[oxƭn25qnu;/ "Lo}AY/φ>[CAҮPYky5 56Fr%b'-toT3zxN#Zg\bKsoأᏌO>xkX摪Uu.΋~$AJU5]^ּ_j26.4yeKdkh&ZEIe<)-80(WF+0Ju~fԪ^)'~X+{X˸.JT('vm[Nnߡ??) "bB4ml6cmV K8R"RKTIY! t+((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPE|Dǎ4cPeʊ v}<>x1=vݜNB?95t̏+?՟O_$²k?-XMm Gʗ"l }Xu++{+v/ύ_n v3JN-+ET(? )xƷ&%ו,Z.F˺O_J(|^~2wkh-fXNv\Gnjh((((((((((((++|\O ovۗQ%# $G,@jB.Rz$l*$ze ,p.u1x]#W B 'x}7>*x^ K>rQ hCHS؂2f|7e\]'k5~ͦq[TtTfҺWGqEW{aEPEPEPEPEPEx:wOWL660-[%n>[^*uFѝ_{;?ǹGBZ)򅔗kP+ (+ <oFW_k%Z]"W#*ap'n*p~);+dsc1te~X&ܮtU *K{)`Y- b8,8"<ӳ:M]xǾe[x2D2X kxo:W|?CϧjְїuUeʑ᪪Jn׶nKIxu%Ε}m6袊((+~^ $4mC$vL(\Gi#`C߶U| 5mII#i8^6sw7d~<'7֕kߚv>v\Y,Ou~^nkr>Ӣ+O ( ( ( ( ( ( ( ( ( ( ( (57CƺvH5:`E,SJ' +jzF.N询տƾ"֒ZjݒoDzUWu _6ME|)̱y,>c[p t0X*7W8J2b%v{/I|?Goh(R]zo='Fχ^(3B.{`$'0|'G ^vߔW/g[-*7;pA=~[UH9˲e,2i{|]E}y$}?E|S۟o|EYj *Iĉm<ۣOV As,CMŽU~g3s|a<E$vz(OD(((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEP|+xqa+vZa{+xφ75#U9y|+xq?fOZ%ß K4uHd%,Ĕ$]J kwc*+iO_hPo;/iQHBcH€1^S~W(D_KfDIOyn^Wg`TaV*ͼ~eik :qƃ ZP@C~k_|PHmBH") ge%1 YA$Sԩ~ڟ|['+0& 诌&`[ɚ#Rco~o_<jx%KK-+[4mf!O 64-:wy2]jXeij< ~_3>/{oً7KA|./k:vX{<$B#2*b*m]  ufm!о ܰx#+ V.Ap*x,P?$>[+ YXqrE$@)g(pI*>cmO iOE5^uẹ|/MˏyT#*pxEĖ =tJswyw fPN@_&>9#Nx|0popmw_M_&E\}_IkyekjTv (((((((((((((+K^m_/o*Fdzf)\G=܁SHEǭ~?ixſ~}JCDa ]j s矰:qNѽo !Rq Tk?C}6)m$:aB6+1q_9xe.u-J]xn oY$nQ|Ė ";ۍC.@nVd)qҾ{_/?>!]JZA _ XIm)bȉ#D4%P!53<&^ J JWN~Mk˻}'嘼M5j\m%w4%} ?jڎRxNB lig e~5>%/&Koc H`J 0%Y|$m/or[]oմeIfǕ^F] >kzi42=yD"՗ ]?S߉>EGyêL 3^mqxzh 9)r%ir5;_N[Xr|>QJXUQqsni좣5zv?~#|\:\l`d$oɒc7¿#K^quţ()m%|b>u|yʆ*MՊ-4|BM'mz=Oq8M*Rr4K96KG>-N1ZėvֆG֭b 6܌#>x[ ٴm/K7w1-̶]ExyT0ãvMөh? OZ9.e2sɸ4F2YdOmcFb7K8Ӥ8EZNi9(^J7V|GxK*U9;A;E뛶IGǿ~W_ō|BQܤFuub7hEtr]rw mZ7{?Gq<ęq(!Y Ŀo ؽ/_WOWY|Ch~7]FS1oss O;$GCVLqt#=gk&{YVңO<&"^Q\M[$[Ke{.ϊ0՟]k߈?i)u Zq FcpM{_MjiI.C.9"\(i_I9 TcM|4|2]{'5]Xom:cdAFF#_dE|4w{j?]jme wіrp xЌ+9(sÚ)%+MVOKXy5yN3$>Y6=]%_|->$N_sh^`,7]8r)Ȯ>.|5.|?sjJ`pd]hq Ϗռ;MF]g^]ijZΓBn] P6|;x Yi x^"én=au ,5+`j$W6nT9ۼlQvb3dhSRqCw>YKIӔnڿK[|U>\xRWCuj5AdR)O+pǚ~3iO \zmƧl#UDŽe& 瑩]gyzm{&}q啩цWAQU8m{W],O.(ԭ<һ42$TdwkY߶$f#Ε˥hSze޾5E]۳|߲oῇ:FO\xKִ=.m/KM*#-0me\^jmbx|1ۗU䈠{Y$o">ײ ㏄!sxcDӭ5[CUh c8Ȯ¥x#xB&u׻omܾ%İWSI{'+;+kk/(¸~!ßx˛E/?Uꢻjio~3]V|uchXiB5*M_c9RiIo[h|?Fa,.5j7K=<@79bFsc_=~⇸._MZYiok:Jӣ$kmZn`udJ&5)ToIKY&͝b|txSM/c'9nvhh՟|Ig_φzލ⸑#`ݤ0H@#qG~Z_n3~NgjDžmۨZ 9(^2IR>iV7U{/|%~Դ۴KRe##!;8~ .~ Y] ^Ob4-[k1:̞XHYU席ZxWV.HxTNɫ^[1tc1K7ʓ*rIM|$vuggį?:Ɓ ~"qȑhPs'>wi_[eTJ@;s27~c=k?ey|;i-K\KEo|4YWC_x&ſu~Ⱥ f6A%~*ROʠ{6UiA% -iI5jjҍH19eҫ6LU-tNfia7Gu뺭ķgğ,`m @G\Qo^%+?_Oi-[" rp )8ߌGִ2j:4L%fӢd`#L /ɍzoO &aطW5,חЧ~?WVv)r}VUrJƝ{ho~7SLxLbf D@ڬѰ<ᗇk? 4Ho ۤ-n75=ˆ_2@pj +QoѶ_7[?t |Cd5oC"-^[#IlF \)t] \- \4g8^⛌yvI睅R5I RۭBhÞ72 ԮQZ[k4Kc`xY`TOx[W vEi @cg#|  Vg{?>"?Դ1>a ۆreO<q'5 g˺YcaGz(X!Pt>E(|iGZhV0הpU='wZM?Zg̿!^Ŀ>3:]R@TdpNc7gpIo/~ӿcǯz>!V!sx<"Cin.Fw`vyP,[6cq272[@d0s?`OC1k,|(`iq4UPqMBwKd r1a֛]&ӝ8 Y]sowAKSxAt[n\H`F]۞7YSk&B#jus^I>u~t(d}ƾoD~LW~x_>Y?go7/h6,ci~L;P;V@'*Mp؊0ZKҏ:VVݕZkheᠰX=zt)ٹ݌wewfޛy@׼/ 7AF'5KHnuI^كp/< g P޾Wo?xV5 +PF{kVUU r+(_C_*N;ZϖWKk񂦸zB捵w"|>AG&ukp}m ŵ "e<y7ӫiO/cj˵ˆ f;UI‚O`Mzoo&qs;jaS‡8z& NOcr)JM)ꓲ~ehp]ͳ5ᤚ^^wL؃>wR`1X(66+}ٽA s[Cxbݵ->|F#*fMJIpSE|p~ ߆i,5 Y.P| ey_ڗw~ O'ue azD܊eRP@RĐp'WI/7~fru<ʭVIAȰ3Hk*$崴{E5 v7+õ)xy(Tߚ?ۇƞ?FSQ0,{՝di@#6ӻY{ڲ\5/~( }: ,^^sg3yaf0\/?o]v{?\v3hZL5^XP_H˸aNk|OWZ]{[&dڄa_2[.VY4#Ќ%SWIE'ͯ{G1Jy>"U*5%4irmrs~(4_snI/tmrQ xTɏg$Yv;[r'䟅G;}c^i)Ӡ1ROzqAhR¤TbIQi[_ ˧/mN\Rm6&j|B(҂((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEP߇_}_N1x>g{#y~z>vghA]lCm vѬQDƊ`*pREi -nIY$E 0UG-lӿgρZF6r cIW@r2#r5Qea KE 5OzӇ ig8CwQI ( ( ( ( ( ( ( ( ( ( ( ( d*4RtpUA=A@]PcYK`t`Lx|6y508޶&k{i[o0F 7?73]Wžѵ~{c ^)b`IifXåX(HmXED@GzS֜9͸njP'gf+&o*:v͚ʢ\Kʤdn#𖉦jgw]3 {k]AuQʥ$E 0U8 ԴWWVɷݻ,=*Q7τ_|im;m:te@P+E\Uے|Ɇ EG5/ N_дI4vȌb9E0~Q1\4/%m5m:r[KBwG"6G9j+5^崟uӶ5ߦ(izfãiZ6ı(V4@U@q<5^ 4s.g  cU$gt5BU)EI[뿯pt)8m6QEdjQEs'Wkj^0nh-bTc|m` 3fϴɎjMuV[wvU="er핞i-WVөIaȇ!ÿ~GS4M*d-ͥ́u@T+h&8:pIv~S Fٴ; 񟏼#;/z|,|@|W Yi}.4 a09jgxTMm?aa.2kѤM&>,xžt<3?uUmأ2;|X+A觜W[x.> ;VZp*ZG#0Rr4@Nž?sO M%-yn.J].ŊɡVjVkkg]蒣G"GYXdz;NcKx^I_/P  'F_iZe+aooV܂P[qNzySOe Y^v>r i{g;G3KbB\V/Ծ #L>Yt!"7LXs|WW "kT<4[ennmܩaaaçi[ E(tTET@1\g>4j^/𮏬ơV :.RGq]U?iNMKm?T٫Ҵ+BtK;}>vCmk k言*>t_K]gk'#tze!}$`*kk.Q*'so;\-lx%SYD21WEE)b*Rn[뿯pc)(ޝ{Q}bZ^oy_\Z-շ9̮#>ߋUYk,͵w1nlXn88ȭ)*SR~n6W24^`|5Zm&vs1c$MT%jH?wVb?d{]!:swQEsEP\٦-OmmFM>*U9)m5hȩJ"Q&G6nielF ܴ|[߁䶟5 $/&𭾽inC.wa=Bb0q_-x7Mő>eSQ+ Ci~UlŽ!Rr?lgM foeחsmgC$3mU' =5 >7/ߧj ]2d6\v 6W#޿>oC71Yh>mˉ3jz]4qpJY"Tpb|-gZ>=3[>$w/N"Qm"e[oPޢ-?8~Wx:G延ծϒvG#Y?w p;3woOV6E-ZǓ5yvʉ#cnB7)?xØ|8~·b k3$Xi+Y4ذtY ;l&Eznjk__|QxY4{)E-q\4U+{'~_xgZH{m62`R v?7~|d?hּfia6W @~ͻC'ҫ>\_~h_ 41_nqjc/.5 [~ΑZ[^|}#LDÒsFŸ >Zjua'k{ w8trtoH5osxs j?㖿I#r]. Q-e :aJT쏭 7rDe.!0+NHdHz ј'mIaH䍃# R8 A_"aښI}'RUT. ?l0cPČW /'5h_Ow*܈a{asR%f$sQ_:m1-i i6{D^-o1CGo@ h/e_1m:IaX!KXUUwĥ˦@aJT5#:+<3mo,żVq*ąٷph ߢ>$^>#s#(Ԛ+ƶY|P$ȡ}J+Fy~-=;XxKKH'+iϗR/݀cJҿ&cwc𾏡yC|l$yqp#`) DMNXcf%q/ڧPR;a.Ks>~pmǚqמ> ? ~;Ao㽥9 /[¢K$G 6I$*X^__o~xV@4߶=1J9 G .=kἮ7%mSe9[{E^3:,,^'Rr#}ߒ՛_ߒ_ .#%V+<- 3x\6]Uʹs$DGPӢ"˸r_b*NԍXm)+U̓VZ»S)RI͵'hޛ>֯+ <oFW_k%Z]"W#*ap'o|m/nI'vcE2`$ *ÔX.YNEu__S|)sᮡssWS$xg>`Cgh& 9(E]'hU+S%dy%?D<} |2+O ('_n|%^*Fu x-xhҕZ K3V4ʤI>pex/,ѵx;M3s# Y_T ~~ڞWa'ʭz& $sxxRxݒl>]EƻMFg[۾HF,Hv} _ǽO|PWeǟmb#H^ #-a{Wip<)Z>C| 0phUi*N+.kk}ǺQ__|g_~x~MpVHY|,OWbGJi_gۡY|mY\[oJ+#LS|b7R#uyxNrJje{JEHFI: i-_*v_qoE~w~՟|Ig_φzލ⸑#`ݤ0H@#qG~Z_n3~NgjDžmۨZ 9(^2IR7 N")QJW4]>vDbx+T:-dWӦME~j|G+쥢pqk:-_Èn$`)G`IO|]W??o*4҄oPo̍ߘZ|' 9QkfɧtCxqf J5U?OUf>?qƃ߇5:֫Wݟ"]2d%rG;߁pt)wR,2s 5VpJ|)iCѩQib9T{뭭n>_r^O&>B ˺(~ʂwHc>σ~ԭG|5v}g7HnF6;([!lKjTN*mYwu$-ՑGiӥR6(EKI7eg5[OT}E~e?/ovOm^iQ%K \w~5'~ň+~/}ּK!WSR72[L>EAw+`RqZ>9QxTFFN4v2\]N:pu9eyEnMm5t}E~WxV_@ig~ +"UGY&Ҭq z-m=WTtoAKX缱 +eG$NQz` WGh*n_iҎ*Voݝ{~_ho/ا[%No=*4BYay/>i7HZ 2bo;Pkwd{ d@9l#a 1uN4:mg%},iZˋNt(NSU\nݤWÿ_xsBxᷜҪ+KmowxC,2 ?kj=w{xnHB!CldobPUCR4JכOX6E\]FRunJKIs]WӚ+ٗ?+xgKx`($׭7HpGsW"K7Z.B7m ,3;de+{YgO0% 5ϘQrѾYB.;(RDq3:T7S^^Y|-Ys6މZʏ内XS)8*zssFݨw_G_V+xzo- ƍD#*ʲl.]m`I&ߴ'j?|!}( YJI-# +@䂧=J^}-m2: Ssu=&vn٭v;957CƺvH5:`E,SJ' +_;@G/M=nq";wnxeM 0#c˿h᪼a Iy$%СC3]Q1QW^TR2՚W8´pn/i(˖mwMrڻx_ZjDگ$WbL˳;Jh!>($&iΩ4~Kܳ;0bnǁ_ @|2k-x:6[5}fX&Q6H2oԴ t8ee`NC+F8?b;?߄L:fۊn&ft[_EC. J\Ž$g;1CV_~ݷ3W >1o,?lG&Mo(o?NeۖQǗݚnw9Ox žHOSolTahW,{7E"O_ 45~ O`Z|A3<3))m7 [C?<+wK Zԭ"1~bBGR4|8} 'fe6b %Rv,I])?VuW &޾mO'ݻk:Kw^_+Su߈-?S[Nk{_:} eqya`dK9G-?9d}nFkHԠFǘ #f( ,E_'߇5k?>2ZwZ%(+߿D!gt8?fLg xςtkFRskgas{4˲T3 ތf"Ҽ7 Ez&e1Fa8mR6(!Ec%t'L_h?*P焾x+tΏ|c7CINbݳ7wg:_ ~x{kxWD Jm&[޼*@H){U>],~}^9o=|]=MxLB!I e0`, c 'S={_o9m涐?\n."dMЄU'$tӣxF˩Zw Ue88Ȯs|={xGDndRqAo;#rUDW؜STd<gOڟJL'ƚd%ʹnxYQew/5mCe?Q՜B-& $"h_^ ߇YML4%YN;q\w> $| Ced7V:|\ln%T\[Du; VOh T o3>:|KeËݍ uye,+4~dgPJmO]s^*g֫+ko njfqp@3ai`<wǪj?a?4=w^GL\^I5Y\ UBW*QrD?V-\k֟<1 tP(;w SԴ7YJ C1ʇ8*}reH>]>Z;,HzlH۷7h;bdRQacaӬa/কjF 4A 5<"!X"U+xKB>Y<юm$d-$6[;sd.S$cK?瞩;MOkK}#S]Bn./FZHc ,kgA]&@g閁DȢUEX OSX>2k$ZWRܓԬ12+}T*1é|moO~,n,SCoE䙢 V5i1A,s|R׿ };<^|,56Ԝy]ҍe9"6i]8$-4̬ܮlwG|:77{|A}+LWRhd{qsl!vEb #/9#"]??I|13 D"Uu&cEl f'R3wFkS_[KmfdeQ.%eR27pÑ+ ?~ :g4k ћ{Cao3yjɯos bkaV99m-{__#1j84c>~^Vx͵n[{^yߵ4Mq)ڲz#BZޡ.<${xْ9Q ??4 Xkl"pR }Ѭ/ˇfTM/@4 Zw46:]6vяEpg9>n.R~-)Y+6YWQtVkbfH k7'ÿ~ԶSi>#K^quţ()m%|b>u|~⿇vп9on6\<`Be^7BnJ-%4{44g8qfS;N_ d&⋫C q̓("Hԙ^Hk q4?h.o`mG|vKC#xͬZ2 n\_Z'|;-LM>fDww3޻ -IJIX`pA"ap>Ž]I(JRM<]$'f5Wc&T%Νmڿ-oռ;MF]g^]ijZΓBn] P6|;x Yi x^"én=au ,5+`kF9út-?QumA uu^#a`_t{ n%k+*Jb@'֫ 8 RҗOi.fII$߫'|+U?gTRnm/DC?Ckn/6]3K_~tjO?3tDc/)A恡xLE.iiӕ2_@6`9 28 yԸ%nNOʓ>*k*+5|CMlV4<;r꼑k$DR_;◁qþޤ.uo huhb7c򺺪-`agil:6io i K‹cHT P0s_<˻sJ./2pڼ5RFyLYs>#/:i{MI[sL6GU5NIv8zlv4QE|!X"- Z7Ï>YMz=֦?[*˲< **-'Wkj^0nh-bTc|m` 3fϴɎjM}n#ky|+!^qok C3j[C[>WTo^hk+x#24qyLN3_P|$Yβ ]>iGI.8`szsyne9՝$rNM=%zhyT.[5rRQjiz?5߲/ǿlzOWgIZM9ZrNܑƑ"DUT`8W?G+_9jק:_£vg1^#x~±cG4D9:8* 4]MYl+cyNGĞ%> ~8 f*&hG g΀*pQ{@te敦Y)¶jI!U`y o {KYz2I\ʗӿcQoѶ_7[?t |Cd5oC"-^[#IlF \)t_Ktwi|4ZfL#dXUR.IN+o j_||Sny:wKso&E,9Ӿ+19YB\1IFIM{k|&xps5<:rpvM> ϖfOMi(/|nF<԰f3գ*~>1G[<)xgP>]%V ?ήN;ԩB~XXXivpe6v#b4@U W⏅ 9)*ͥL ̨&>L&G,y|PZJ4 OI,e K 1>O‚x[EӴq}/uk/?[/EVU^࿁:xťv 0=Lfn|dԐk/xNtM:=zx2i0B5_5jl`Uox^9zEu X|<bRGqqiIA)F)r8m(ËZ.mSUgQ)G Ox~MlsO/~= 撍|![:#n.ȬP6>-)e}x?aO-`GGlÍ#U}MKe ߊ4Nuwiw ̪$q^|UN%:~9ϚWRokeՄZAUB\l}ާŸGOCOo? ~d 0XA8iՐ6,Fp @OW,EhuǕŬR[s^>N~R9|C NmZUb_s6ᓃQS Mӧ*rͩs]E.jJDR*F8rY=wȼ+~*jV("8H9W_P?_ ?'|?x[IA֟kiK8R#lhŎ$y5SEVk"O:j]y|^VIP˳Hc+-/fW}u=l+eINJ7zh~z~:|T+:v=̻Y`yjcT($תxh>6kH,-Q<(y.c*:kx 5kwtysxac#ݴgz ~|1xwrre[[ȧ4qVwT3-$y9k}m"IqЌk8b%=_[+~ |a+![{yP"AIL,O!Bh{9*f8*-J/D>&8`6znV ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( (?ࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPࢊ+((((((((((((((((((((((()ҝMnZOXם[2}Ƽj= ?Vcx{A6h(EPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPEPpython-elasticsearch-9.1.1/docs/reference/images/otel-waterfall-retry.png000066400000000000000000001430231506101734100266020ustar00rootroot00000000000000PNG  IHDR] VsBIT|dtEXtSoftwaregnome-screenshot>-tEXtCreation TimeTue 26 Mar 2024 02:24:39 PM +04 IDATxux\e1qNn.ﯰإ na;f 22Z3H XeKI.gs9̌s>ckJa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvUrqL-.W|Θu9QSz\Nl&3#ƌd,Fӵ%yihh!<,Zwc4vgb^_x06k7?T7ް/hx(bܲb)sfMw{>ZIRjX#BM!|d,--7?`ْyܰxeȅ)J@wT0~Ml9s2e1iјpv{O~N'Lk袱7e+j5vrۋץ%p:{ihh㥯7[U?9WM34/,$ꖹ`\742--m/S&؃w {;m*;DT@ ?d*qs{~c0stt$ё׺~>^gE>V#;L7%22bА+х?:ֺ1-txy DكFZ~?sIDx3MbтDG}oλnq`p;neCַ3&4$'t\"0߼HGg//iڗ&4$Kiy ~?&sfg`XAC?PDSsvryb(*>~2?`ܙ<=cޖC98c>='͠SWȎ9v]NBCM5dnSlQ@Z>/XybLtv_`1>Vof|vΛ~gӶ|Nv9c Az7y[>'K+pΒsX`!rS|$vHvf˗'+3m sOgW7 Q[ZZ,$l|&dn+}fz~ @_0خmr8gQ̞9% 2J oAuu-&yYyqїsuӲ~拓sviz{'w-S9ϙzmƩ*|^9rRR0?Mͭ9@|ǿW WW!>.o`6_zSl۹N#Ù7w $44xLJDDDDDDD/ӏ~]&%% d]FϾ~<<#ٻ>ȡToed7#DFEPRr?XGaAIIC(v.`졼NDDgjGtyvW)Q.$% 08Tt'IL#2" hlnd2`Lr3J'22|ͭ >Jgg7 $SHd"<,ںF&e3sd2Hlin^{>$&®x^2Ғ}Kk;Ͽ}HJcbnaa!8Y 8(ߏfżә<)4cptt1-Lt*c=.[SY9Z³/řzRHfR|%N ,l` a^[ĤlhimgIMI`0x (-MfF*y9NURYu)r  xᕷik ;+ t;;DsKm~/Ak[ٙd[x?.W?~w?܌j%7;(Գ`1v)I_~ ''+H6p`11ёC* uBS&`0(-⹗R^YMZj2z},4iɃ:Uk(9YN@Djj 8Y 2S/):pƼY9/njn`16Y$hrJ*L# ':*fbY8o&Ydfb1=]LnH%#mI[{1)/ܬt&#{ 8]]KvV}vM  *+g~hd,a)mm?͒%7f|AYY;vАV Áj_ۯ6w6w]ܘsvΞ^6lE}C?t7S&`Zp=.:[ͷG_} 3HKIIn7+o\DtTԐɛkHM]'S&ecZ=?YKm ᖛRYuELs0 ,e[>q1<=Dh/Ů&3mNRVQⅳlx^ ;ٻ0w}a%ܱ&p3kXp6cwylPԍ*?qcݸ\.{5Ӧb1[p{<96PT|xF#M-=HxX_~"-m/S3O&,4[W.؉RN*'11;Why[1 <轤Nxo/o]ϑ'L sGΗId26-7H*gLǽwJ@KodbN&11JGPZ^-7-e6oϽκۙ5cʐwvv#IPP h4ͷɚ{V3ur.f~W?w ({oҲ*V&Ol67?G7V羒1?K_U=Q/SyIYX,fvٽ ,37-_DsKE'HȝW쯾u9&ZOȢ<e|'d2r3?ʻlG*"""""""rm]|{0|>6m2[pFXs֭S@Gg'k׾ڵ?6nڌ x5PSS(.>zŷǁ;GծS,?sgb!(0E fICS3J+h\[ȑ%̝5Y3& /00i9m}>~nx>'0vs)\<]$bYZ-F}_'xnBPP f 93ۿ{ ơ`,^ZP"SZ -?v9K`?S|'?w:X,f,Y4Oskn7=="Éb`I叿ݧ ;;S{jX^(+)n vVPܳ |>?^NjKhHɉ lVfaw*l0lږODDwq3جVl6+Iܰx.~Yxos7bjCzj2u\C0ܹz%aX຤O3 j[3kd̦+ΕĜYS cX n܂}ɧVҹ|.7-cZfbN&S&Yӈj`$7 }~bw`|>VݼXfTY% d9aӦ-F 6%QΙ=d?fa&?;vq睷S_yG?v;g7MQ?+}.w;ݣ^Z>PwÞ3 ,7GNP[ JZ,LH 1LUR \g7\rR+-vQS"#>.`[q\́FcYٲlm$"#0M_>? G2RߗL&R룳n S]SƭE|l` Y\5`6;{B'>n 0]fLHRb;Fr8x8mmÞUxq8zp8Oy6Ng=xaI9,;@DX(ӧMb70ZL.1ZL\d<f=`oa x< TT(5sϝ oGؾs/AAdet\Ƽs{<8=`{bt<+ٱk/ ςy3 $:|;Ieՙa3=^/݆jm`WA!ev{p-GԨ3xq8~ZaFL&>?Lv1/`01چB&Ӱm40Lc Ab>&"""""""?E=wlcÆM̜1l6!((hlFLPP߃KznL]}̐$22pd3TdŌҳO2g:w8MPP AA?5{\Q;P_c4("0-bZy,;}EU>]{&)13\VP;KZ>:Ԏu/qqyw8XtbcHN'>6T1``r^6?ﰷOEsoX0氣$&4H 9U^E̝Κ{bwvF}D`ԡ#yᕷD6hb)3a dl]rؽ()sT"u &:r  FJFGEbZ[=sE_)f@b=9eS&$2!Kcs+fۮvcXF z)lfcC.9N !v8.?YFzybgdbBV,[HgW7eUF6mMF߸LpP UkVd$&y1o4zz8SS-{TΛIdDFY3&sݫr$o ܻ9YL n}i`^fj wa)8{Zep:#<7b2 5A% hfQZZƻo%//axhnnwޥid9YAb@@c2G<ėKX,9璖fZtҨda4w~٬$&~R{p?51rl\~N^ή1lES%"v⨨>oiOYiOo446'd2ˢ  ޾!{ ĬQ֟d33+3(fv_񐛝NB|@u:=<|tf5 6=s )zNzf%)!sngg7ټ-ǃT9g> iT $8DNW^£ᠶlim')1̴OK74HbB6_c~kFz{/hOJ' Үf_4kԕ/N}~7E'=Ayvp[Em0;w68N*+Xs}睅l2(>zKW0~(nfHLL`͚lW-}?[VцM444zmW<6`LJ8 ظ3sQQ$/'{~^ 2bN\l2apkhR:5%w#GKrq߽~vDUu-ߏjaLfϽ\~nN>h+?jNYa;Ͼ&Oۇ㡷*z{{ &00(qa0 TT\6LbAnEoy.8fo^p㸄c1)-y.>_zHWl~bI ͆hi xPk041P6l \tvuֻqqIGg-^xm:wvE3x׽bBYq;]wź mW#oh Ov{*Ό},/'H^n7gmcx,+%2c C&޽ƕa4m5堯5$Ԋ5Fpd9}f˶|J#^a뢌 @Nv6444M+n:)ɘLF[q:LHJf)<,Wx^ㄆNye5,?kTU.Ĵw<ad$xR o_"1!V$$djbn&=@]}q1wm5CC1u"Ͼ)JUu ٙܵzUIY}5 &&m, +n\;c5#j60}Tct-mLttEFF_a͚p8x=^f3aa_ p5F>,fm/ +Sy2$3=v㔗&<v~{{yOc.9ypI GkL挲|ɹ>q n'%9}(p>zzkF-Ojj좹pVݼEgrŷADDDDDDDHUDKk+ߧAy핗1c`ǿOg~Nxx8?qɎRrb34f&c1-S_Mnsft\XS"ݪ,׆h%݇Kmrc!07D^P]]ǟ|KDE^L\eL~ÿcMnf3wyYY׺[WL={fpO֌kV&'lrµЙل̾]ly:}HrR{7O} ϩ--""""""""""r!ER95-""r)fϘBVz*khf{kJa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2)qOa{ EDDDDDDDDDdS-""""""""""nvȸ[DDDDDDDDDD=""""""""""2uDDDDDDDDDDz\=z{nfR]Ha|.}Ʀ&Vݶ__tOYy%6hjn Dzp\y~|ev噟/MMcZ:ŒSYn|>;wpx'X,+sT EDDDDDDDDssW{ ;+Q/[ 摇~gxu[;Xu%.uv4Y-s{y` yW2Z>5TUv{`lq4y^N,x3S/8v9|M͟G9Ydg]RS[ϱ$2mJ7q>BC$"<*T.GiiʫL%7;cĶJ+)-xhh0KͽQU]ZnQL$(0~[8YZChHJ=KzLM=h&fbZ/2* {<""Eg]ex9YZA}c>r2\㗳*[1qg?斶aLHd+[jj)*.!;3 sqD)-X,fRHKMh4^PT|F3HOîB^m6+fL!2bffcYFSX6!ؙ7w6m񒋆Ξ>h3v7-ÔWb0f&k8xBC7o6rq`UUSHOg:ݘ}6U_^Dդ:)w?چh$(0Ry ]Kv{x(:ZBhH~?tw;Yd\|>W?; 44dsёy5z/c~6nEgW7 ͼ`}7֮EDD-mGCww{.mپ{?]]v2r1 m_x#GKrҫi$^ذyVx{ q&~8=DEɦmyK;,($$(DGg'i|5.i_a2!|$&s==}ƻ*$44Lsks2yh#l|~r8{x8YZALTήnr3y;/xM!22|f*'^//gX} ]y-UյDE,7 ~r}ihlf{, 희۸8,O~ڙ{]/<ğ~ۤ$iА ^/^wsn7,8go}0jky8γ׾ErWbͿGdg 8Rt gO^l7ʬǴWu <(sw+ []Sqoe$ͭlܲiSx N*7L}O~?Бܱj7,``νVr3<ɽwݦq;lP1fMDM]ww;h"Sbhni_=Ǧy۱.:uܹzkp-m̜> ^r}jmmg|/[c0k_>Bn_⼳E<//A|OLCc3yU{y-~ y]:YZƭsJ-hX Ͽ6[]$'o}*Z.gC/)bI J+NϾJQqɈu>?~/L-ʣdٻ0EX8+%Wr9|>mNKK;a}>N/>Hnv~W[vFlLU\*BlV˨[ZۉO=g~O˖;oXW^[y'$44ٰqf;pWtVYy%--m/O?~Abb<+W,GϽoÿŌWbܰt>ؘh 7{[?G@@Xwu+%>~v1a=Sb~`sdZ2%rm).jrMKgdↅ9V 6h#1DFSZV&9)Y4QRZAoO/q1LzΥx'hkfLJJg)\;~̴`i?Yƶ]{F ==c.cxٴm7y98NjZL&223>Sh$3= CBvN 004cF*>ckH߼`3 <6tDGrI|ly:]CcS )e"6%^N,0L!(]~JNԂh$)!4(:*շ.gBR¨TL;ǮJiY>ӒiljTi%.3\zppf3HKl}::fɂ9ߍɉ̝=F\.s\rxٝq1~ ^|J˫F,8MuM򲩨AtTS'8zm3)/s~~ʫ8SS%2"L|>SKVz*sfNNKLtǎ1v\1aLx)>Vf#"V߲Y3&>GFJ˫F u>?~عsJRS.7 ,7oϐكwþäv:]l"7;ĄX** uB*ۉf#3=x}/AEv`ŔܹN'ǎ n]j`zRsgMZYLMxX(U5`6N,GuMUgpDD2)7[~)**PrYͯ}[[ 2spx 9NM5^|5A^n~SʰZ-_~wL;::ӧLֽa"N,wp{ASIM2%gB\! nV}vߘE^B(N~Nlh^Ҝkz˵wd^9}j[7q [$mΪ-; X`w ߼$!>^6m5w0uRϻ^u Mܱ!'v;lږ?s˅`pazzHOa zoΉ2f3aa!WfǮ,]<MuM=[w_| 5߽h$>6NǗX3dK.fp,f==?Q8UVAB\YZ-7vWא[w!6& gO/S|Ϗh4}>㏾00 WRYu8|>/r"nZ?@w^:;=N'46Ǭ/8}^0DFRSĶ{yv'%9N6mŪl|F#Gzj2 ͼhGK[;S'-OHZJJF =lںںFZڈ T96&":]æaBr0P[Jhh0UP7 #['7+KFP`'K+kj0kb/<<Ϯ{1c*nX|If&DWw7mTV .>-11i>l6LaX4)w9xXh(aa8jpt<1X\7>.f%<a mj|}l:1p|rx yj+'_46:nnM|/)Ig=\ <̞1˯Gy޼ HރTUגDɩ˳gϜc,fْ{ Fttwb4ٱk6#lږW \OaRSp8{߿ƛ Aꋯ}Qogڮz{66B! !4H#!!! 5l0W npW[y![ $wZuq%=|n!3B6l֎{p.b} Ս48&Zf.hBSv#mM>mVk8nǣN3r$i܅| G7\ Y,mݾSU)""\'>Uẗ́JLUTV(<4fFWT Gy5uZ-Zɪբ:-^\|2RNn׏Reպ[)'K˯U׈}jm[&4ޜڶ}*9aȎ/ <~\ /א4efO*0ۯ)IK8߸UoNEKq'ժN %+$(%)AG R-LTyy\k/y=^9MNaf{~rR:` rzgJo}9%)A!!:rDٙi m'k IDATx+xp84x@ ԗρprF(=-E!v|GߚISf;nh}UjJbEА%''!薤Eyڱs|>cJJ-î(,}^tfoǝ)xeۚ}p4㾎3"\EޣI r׻{öE*+Pf "z_##]JKIs~ݯ9,V^]!ClL&\6N]Yi ў=494e~;Ox-*褴 +a ?W~@ =a0?W6o?:34ZCv TVUo#r{<=bqϟ)s/: /qRS!߰YSҴwM]q±UE BІRVE9w>bё$j6SwToϜAz)!.VnxU>c^I1jĠ1zfz"ÔrpqisVnUVU+6&Ji)wnSu}NǶպQS4j@ K驊r\s>H3fSRb 4myi굺ox^_+.>V7_?'=܋ֵNR%X,]|8YV_,X7zGq1ѣpAmj1/;Vcۺ)8MvM>kMlX9D{jk3L- C _gȠZq^4]1QR>=?`!0o_Pq~Q\l UtמuN6PX,>Ǯ~uc_7w酣Iz?*!.Vߧ;2./$2MS_ZO$hTj]KbjwLU3f}K+';C}zv=@ -'<f?_^OLh~-/)1^?v|~mڲ]@4~OUB|}zuwkƭ*-+o2F0fH)ŢiMzWN׷R@~,'ͪSȺp5q vIM.YF&|Kj[3?_PJru)P>O`4=Z&;]C=v.k8]}ŅZnnߩNvhÇgediݺϾ||>Uhݺf>ܳ ޽G_v23i P~Çhq;eZ3"BW]yƎ*ٻO,ҬYh5z)?9iK{ƷĄxUTT5:IyT(:e+Δ3"\7e۰Y[~e+h衺pfr↙YR*lF_n PFZ6n.|mج ju蓕KիG=KL/^o}mЄV`tuWhؠZ~mߩ~݊tuWm8s~@Ih"߯S~"|So۱[+W}uENZַ۪奤#RW~_%eق31뿄[-3L{v5i=s>d9E;GJ.w҅|~ ۆa(6:ZvMe' O%<.F%ezuɅތױ>YT?F+)-ךiP^͖<|~-R麍=a}&2R϶h-Z~}BW\2NѺ qv;\GQUU:(B6 *ҥ:<'Ɵ|~&נz袬4YQ,Ҝ?wf(wd[Ԧ-uW@@}x<^YmFBCCZto>ۤr'?M|2}2=BO]w4s_w$,LT4MjD=ެj'a׵;+msQV^F խݰIq k6٪Q|u)ʗь>9 5j& pzBC֪w. Vm[;wUa^q]IGK4b؀F{匐닝REtW>UUUMޟO݊ r|ZU4xܭ2KvRYΐwj~aoڹ{oCݤ«Kֹ=^tRQA'|>-h>F _wzڷn~N~G]NZrvޫݿL]y:g󩸸TI7fƍ֋Nզ4dl+4e{ qh(%r"CʈIRIi^[N̺:nuꔥly^[Ts-0?_|v{tr"+3͸5 Cnٰ|p \saFs3B^׭Z^_;qc56MS{fksijQ]Իgֹ5^*MEe"]N}t>^JԁGk>]u&w=)..Z6lRZ!sUuvޫ~[>S *+PxXF jzW5Ek[5Gi嫵v&(VYYwfRjJFܜ,9vm۶Ceeׯ,22ҔO׬]UT"ijڷz>e?URr>w]M_ߪ'n[wG /bѦ-%1WΆizs,))]]H,Kmo.ExF ah5ھc} 0 /eZp5{"E4Ű6j oEnꛊt'ajx޻OWaFͷgۯWxX.Էg+?/G1-?r+lrÕHOa2e*`Ny4imٺC_8J]Q.-xbB޻_s}xexѺlظE}BeKgh֜ڴue}}ݞ/kO5vDsac5Ree+:ҥ띙(!!NPfb]ztcԫGmv0O׫VMmn^سw֬ߨ#+"7_SaW]]]}Ӣv߷u)lVo9Si)`P9#ꔓ.Eyr:#Ե(_s/Qvf ;h܅4Rkg^Ҩ4|HL\~}{6Gaiؐ QIi/X{tmTV^WRRU[009Sbvo>prsaOO~ ƎRXX}mcOg2uY՟ьwg\{OarHK0 CF\xl6._Sު_asҖ|hBՃt\B|ƍ,к  eei->X5 CÇ }C?JHSEy++ǘ  Ѐ~==zo<-\ᖚ('Y\R>Q `K<&'*S數gi܅X,VA^F 'h`>s}9FGБnaBVGႏz%jt9u:}RC[eFV>=|Z=7u%%&NGK5lp?ly<^-_FWS^lu휯]zgZY}RZVHn_lj GlO$J3r"[ er9#r&rjͺZh""u񸑲l+ScO+6:J ѷnYg++3])ɉz};pDzڬbfes!n)ӆ[ /OiXtK7_7AQQ.-Z\,Ty9*QFZ.?R3ߟ?)EEtDq WL0I9_KWcO>DUTVR}{wowƌ^U_b\1ez}5wJJʔn )0t7.˯O=oF𫢢JII `08%+k߷"]NYÇלyKd*vU(+3]WOPBB**մsGT||.V `4Kn),4Ό;vi:|rs2jמ}KٰG6"]NY?_+VnCKƏl;_)<a(,,4-۵:|Dy$+.W豿?UWW#mm7ϧrUWרa%SYiޚ>Cyey<1;'|O䜵3&&Zw}V=4}LM q$Yz衇ںO_QEUԲ@l>!fvcs3۳~S/CP~ÜF35.:߆0eei쨡֥h%-ժ] (ejзgCC.g󲕗utrq)섳 Y-:e5kZh6aJLWzZ}=f)2%3B9?v2Zjuaj(-5\E\lcսK.JlF+>>VztхPWJ\Ott2SgrFSNfÀΐP:eg(ThCݻ*.6FaJMIҰ!5tP_fCr3+"3B)IIKMR~nbb Gx&!Y CٙJINhh_0h|k`DDd WfzF >&w[1QHSrb|R&.gDbշwq\tR\lbbԣk.hNؐF]uT/2L!ͦ󔒜(gDbԿO1脋i:$)3=>L^>KIJPQA:Yi*Q\lbcԻG3\' D)ɉJؘhe%76Twu, Ua~bQ^1ip"Գ{RR] oXj(&:R)R'x>\Nu0fOOKր=u#W_û~ :eSNf /[YiTrrc5ٹVE)IMQZZlV㕗ek!**̣φ!Y-V&+h)gDuR8uZxDG*SF݊N8!ɐەoÐr3HOQѲr,gD"ճGg]zhEQ^nbኊt>z#""L ;)1!^1Q*cIf͐ݻ*'0?44Tiѽ.x\XfGп ԭ[ G.nA?<t.*T碂&ǩwJMm:9 O=zt3\C*?/G կ_o;JzzAqa6nĝn[݄ݭoއ&tk&X `5@keLN?`jR-|H40Q3cdeUf&잷頾=qHңtA6nu^o:kH~SY IZ4UP[ jeTm??"@giO]ytoJ  =7Hۦ: h#;iTY?oۆj$Or껯oSݢ{Gqm]n4ݒ 1z~ta wH9l]ia NOݪ|?]1Tva7 va7 va7 va7 vnka֍lPAZeg_= ԯgAUE("G$I6+7#t>@!auSp~Y,SVExp9w@@#2 b; /"yG`SV!1{p9\ۣ wkaBCCO&59Վ*awfzcJsչ}АVE;cVCvs4ZWv> |<^B6 w>@^_}cp{|2M)a]~3Mbr9w5u:aZ?Wh㷓)- Mږt=n@#=n@#=n@#=n@#=n@гul(ާ O9>@dr},sbD `*0eZd±>Y-}!b֑d痕[a~SV!\p\]G6w[7"FP_olYZDM(c8va7 va7 va7 va7 va7 va7 va7 va7 awEE_oi3ۺ9Vbk׫<]Reem,@+9'3Ke˶:Ɩ-T\\rJ.]RwJMKяu_쮮3MGK[n _}x}T$ 2P??555?_SIɩuNʘ|>g_PHh.dvIx5Yzى;?nL/))=vC7#Zl}n:;n鄁_ՔUSS꛷xYhƥH}O9>@dk+|,!hX,y LYtypq IDATmVDw~@u$~@!Y,yGeea)ߔj`!so'WWѾ ں9ٝ=ǴhzTV^fo׫'M)׫F}W1碹he]R5.S箟r^1MSn_V!9`R+Bw>@__;? 1=>qQ0Mbr9w5u:aZ?Wh㷓)-5m -P)I1ÿԕ.h^V]]]ig tK 9}."y^>i&^cK^듦.w%:5!b߫M^~uv|%zuuW{$T@׽)04m -\-_:ftNYw,듦\QQQkt (<<2mvKRxxnz3BRRbRd۲Y Ӧa$vdg*';UKʱCM}m[2 sСz4h`1 `i_pOi׮=m ''JJJۺ9hw~)6.V?zAܹ[iup?/IR]۶A1A|;zw|H?ُTXX%M^~u235t@lM6oҥ+4bdgizY'=n>=շO/}m߱jؑJMI֪kb BCC޽{(!!~Ҳr-_RP~~'٭].[ZZ6H rZػӧk>SJryڸy.VPll:vRdd˵ -RA~'я/<Zgc^~e 7~ה,ݙs\.S ].UjGKLSTUuRST^=腉(>>VVU))S~˟($ ޳W} WmmV.?Vm޲U\-,~ڛڵkzrҰNO?vBVEVUunbcuϝ763hSS|[)y<?=9jթֵZÿ.Z~g}tW7'|O?y{$ϧ95Y,WJJJ3?|.]B~o#٧jm555CBa[n[/L|U +/=_A۴i*++9g?<YF!4ow5W^aCbX-z鵩z)zw+$qm@!FܹPߺ&y^M k.P~em o:x&\q# M7]׬sfQMM.F3ߗEi9k"].]8~L>UU՚Dk窫skӦx*)-;>OwR\l'ы ݺsQ6nڦ#%J;Ys(;3].m޲]unԣ[g~mٺCb(7'Syٍ_S[>ׁdJVS :*n. եsa^WO)y^x5k۠ujUzzwܰ OO?bcbmqbCxX듊$Ilc]}2MSwՌwg O9GK|d}~/窬Ԧ͛%I.}][[ ?QdK^4ZM~lVE*-+WMMK|~WnyyTEE*-+WYyz"3BmڦH;(nJխ͓5rf\9Yo("F5ެq[0TWW'듥߉Ok/7[f ԑ#%JH;~aaah4M-_Jeep% Ç<~}zfO>D]s Y^seߟ_yHu:_k옑իY\eeeڲe$w-RM[qV}[rJ1 CWMHyzu4:PCKWցucyڵWw&/OK>Y;ܥp8xXO>5Q3gSneYcnXVt%IV6y>۸U9 ;3Ӕok]ߺUv]uUz5ov ڋ0&Y,M+)n߰Q|l={_~]yj—#"kԊU'*//W۶ЯRSSԝwޢ{3wo(ݪkг>]FcFձ$Hqޜ>[}{PVVr'-%I=i+eY,֩kuJҹ@ٙ$î{igսk"g''%(33Mv}*DZvMI_ѵKw튉:@B؍vij1dzϧܹtϷo]j=dy- q4z7Zsa}?n+*+r\/X^Q><^~t:=ޥKnj=e^aCJ\z?~C%՗i9bYk[3z5j 8՞sokMx(UEw֛gio;oks:e}xhHCVUg4M֭Pxx{5uV] UX䤄-t,hw^z%uuuk5k.]ueĖr:Ц׮^xl6RR*!9Nj1?~ ;t~?D|jbe^g{Quu(Y,fdե^(*ʥ1G4;tx^-Z$_>s34Mt6nޮ.s…$mN]Iԥn؞/բ^5k7jؐ'_0:*nk_|MO*îpMkMY<:7e/RRN/R=ua'?Ѐ|v=zZ5zC?_ֽ)?NV\cǎ9HRMm>YJGUzݹ$ϧȐsU‚NM_ZV&׫]޵HW%e2MCH]_ʪ*?pX[~sd`E؍vVz5PTTyv-ϧ? mݶ]OջWK>Y,_L=ؓ4-vkvة_Glf3cN(hTZZ!ON;@GB؍v-;+C_CI*(kf+Vkɒer=zW46z<11A3Νh7i~(05y[r:ojj?x5wÒ"\OCiY%Iaak4蓕z2%FVk}T^Q 5dPf@2L4OYucJ;Oij˶ӟZi)~ؘ](<־dJJNLSfSrrR>>OaWRӦӏ$In_V!{ -HTV^!In^SSD󩲲JWiaPH㤯ua*2@ۭrEGE*,P+v{P񪲪Z^WR}HWYEoLSu{eYZѾy~:lXc#:'ӔB6?Ӕ^,!syGQS#9VY!y<~0~;ҲJM~g|bCa23?^}_'++ㄏ\N\>Sz-0錐qJ|%ԯ-dͦOצM[T]]H0 58Y,F!1a kn5HNJh-$$\q8슋=423-[uSBif)˥B!F0 CݻuQn]ں)h!NSz?nWZ33SA XVedu3p`E@#=n@#=n@#=[k֣Cŕuxt0>@dr}#0%CVhZ Y!`L٬Cy $[G2 j]Qx}Y,}ALj.9cu{~ГQDUmiAo҇X\rV O[Ds$I!v34e۬m)պ,yԠs;8:'ӔB6?Ӕ^,!syGQS#9VY-ܵy<~0~;ҲJM~g|b7NG zG zG zG zG zgkh?~ }QRn؈n3AAAAAAAAA hO^JKTS[+C""#kОvU]]Is穸DK.+.XamBviz}Tv 1|ޘ~h0n&>G 'KWh⋯QRR&V(k9L}Jeee ꔛ%Kh}nfwuuL ?U|OY;wVj䈡ҹH.էOO4l7 Ci*))? кp1TZb}O{~G򸖯XDDyv]wqzh{Mm/_Th}nfb]j=Zlg㫪?;dA{-JU֭:U:'2dl +!Bhdy?>Ϲ~9w';~fAA6naWsygq.'<`0|`vQ\Hxۨ^ ]Po bY- ]nլ{H!| NYh @=sP/anE0`6py1 }f[OrC!| N޿I8i;]16RTT=>̪k0 80Xz 0EEdg7徿#/9s{<^`Ǝ{.G{9""""""""""ra7OHbB9al_^Icsn6nqS~~}{a>_|$k\z<֓ӋBX${C~~vÇ Ys)))Eݻ3y4^z .E\5z$tzg݃GfͲس ?cϞR5чQ ,Ww^u}*-,vvCmKmZ3=϶mi4?y G{x |8S&~y'&&OvT#lefETT9Ƙ8##":󈈈va7>mFncXavȉ q=|>>`WojfaZp88GU ;w&23(ؽ{+W}(ܾRxٳ؈b"55Z4soUj~}[} {JD@0Ȍyٹk-[бC;g7#669)`ǎ|d9S ..:w<#""""""""ngGQ1O?|>7\?=zwo /9 x<ǧ`9d`7ޞwKWqgrYtΎ]̚kh43Jvc_DDDDDDDN*J*DOEE6EPU]CӬ 2IJJtTo妲Ed| o0 vu];vrvڍ Hgn$&&36~֭(ڵmMmlqR!**VZЩc;v{8;NCtґo[NavZau*+Xt9E7WjCuX6W )ӓԔ|[r|Ht88u`_v;y[y9>t &("""""""rv~*._|lacXp8A?@ǃ!&&F)ӏE^aGQ1'zv{ !>n? RVVN j˅˘̊JeT X0s7J{ǻ^}-9Sa՗4Ʉ>dtҁԔ/$"""""""r(2x ׌Mk–mTVT_PPolbRiMHHMtґY@m{4koQ\qcGqڐAA&NS IDATq Ϟ3WMeq Ҫe [;U7ŗ^ctґWtm/_&үo/u\_/iV&=x7۶ks.t}3r޹gb61s_Lz?O2*)-^}#A>EO ۏ`sy:'bmZyNڶiAmTVVjߞ$''`***nťM(bƬlؘG(wn :[Las5߯]? 1!Ӈ {׎C_DDDDDDD7""#߯7nWUVa&.|Ǝ7_GӦY/J-y䡿loH۶M+ڶn}W_{aCad7b˸b%umFZ!>.[n 'NLLuuǂ _N\5r7.n׶5N/Lg;wG3յHֻ)^Ħt^My[X.zׯZ-xUClݎf;VYG-9!TUW셼de"]:g]è#?0|@" I~٬*""""""""_O?"\|}$*2oV)ٹsMqfƶm X,|Op1\~dYtN9_K0L\qNII&6&;}6d;V]]bWۍm7vqd;^4Zh(--;W>?<;]:H6ҷw7߃9ab?€=8sfu@mlܘGM***ٜoOV9Ifzj."""""""[D+/GZlAǎh݌Zd[^QA]\֮e׮=ĄNЗYqwݝ$ouc\z.7- </;whN6糵p;W> uVp1[?x':v 22b9DCDVKc&$B~^CLUU\vɹDF8*.ڱϿA$.Czefz*QIL&iM٬$'ͷ٬$''toONVZKbBD86"_UUlڔOm]q{SRRolDDq1DFE$5An]xǏhXRYl%o./ͳѿ_o.yq*\.7y_]|gOak C4iJRR"99ܵ CxbcLq|x)}b[3RCm6مm´6AX;{(|˲kH#+#[жM>""""""""?RZ "L߾6t0.ׇ# ߭mXlXuaDvvSn***$7w~̖-[y@=&:(n'PXXģ'Noݻt:TVVn݆Cl1p:<=F\.7s-"6&VoQ3G$5VvѽkGv*!w&X׋r40gl6 n2jj\r$#.8ʪ*\n!t:IKk@M-K\.%%e{įǡq9+55.t_`0DєL5ǀ~=NiY9>BMCVfq%1!m[r93|zBt6"_>]vm4 #=N'Iƺ\n*++qQTLQNk)*Avv#cwKɌ tXwl2cZ0L4h/:gn}ƞƍEBB<ֽ!|00; 1clJJJΘ(w̲e+yUuLKp8F\pSψN0g"eҥS{rޛٔ=Sۊ'L8aA7@^V9 N=⫪k Vqm""""""""Ia_+y8#8v ` ?'xp=Dsш󨨬:Mlt4K.'/^EvmϦt IjJm[8n)YXff|5I_|I||zt;PED88>+Xt-7ԁ}O6t2_O^o km >.>gbaؐtԟޥYL E;vRYUͰ!!"""""""Ҹ)2x ׌Mk–mTVT_PPolbRiMHHMtґY̙h׮ ƎbGO0DFFХsGz7qPJJJyO0 ""swOIN̿_|^| f#!!p-|a՚O>}30iҗ8x;4BJJ>-@F E.W.V_JkBllL]"#ǒT=)1ż۲ OٜzJZ4;k6aFCfRIMiR$ֆjN>90k7hѢQ7k7yax}A,6@ HUevM3ҥ# lޜrl14+n:3> qSjb޽+)Lrqqu0n7;6[2~u%K0Hk҄^X7r1m,L&3ط…ٴ9SO@NC~•af5c;5_ [1Ֆ1C\/anE0`6py1 }f[aB0 tW1y\v`o6$v8۶mgi׶\|}$*2aB&O΄ҽ[yC˰[~v7> ݍEaw㤰qQ(>t OJEt<Ъe :vlGf"N w좴v].v!..~}{w7ĥH#[D1͜>|ݺvb]]q{SRRoNDDq1DFE$5޽zMi߮}+JDdRR9}i =Tn>G0kXlX@Hc[D~b!:: jRDDDDDDDDDHO[DDDDDDDDDDžn { EDDDDDDDDD$)g2 8֋ٸX/)""""""""""a.51Ԥ㲶x,}<³uιz.99!2Bc``لdjrCX&̺B0 3CȀP(EB`[cL`1_yc0MDȀ`(lFq=>֮BǶ͉p^T㸭}\vvoeyhnAV"'8.I0  `YͺD [1뿖@0?P{MPQ8Vt f M? GovYd6P/ӡoSV^ń/qI?>;軓=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa="""""""""" ]ȉrQ:kYC!MM.ENa X-ftPyklC{4t""""" E V ]4rd"""""ǐژHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""ahHR/e/!,ffK dCV!d@(¢{^%(FIA ]af5cZ9>݊٬1CܤDDDDD8P { EDDDDDDDDD$)"""GM#jrJ0h ڬo ݎ->kllC#"""""n#hOW&B>_C$r0'$߫Yc%[@Ka{_MxO!v͜FM~]Mi ]EDDSo9un1 j6ivTV6t"ac ]4jc""r֯ȃM2~ 6; bjN-͚eq۳9n)O?C냤79R+P]4t"aWKF@a!x/b{[29L1XnzwK/8Er;ȚX|- xؾM IIٸjnׁ,JC7EDDDDD$l)9L&;'!! v$!!J̙agۥ,[ hՆ?JIN[@aa l Lit܁+p`Us0 }'|Ng'f̑Wc'#-nMT,>4\Hv҅ '&_9󣹜gz#Keͭ7+)9$lIxt>7/ɦw:k \$" ,KXо]εmR</kzHLJn%}fڷkÇy|.l?nCv}CCrYlMճ|~fe+g.XsX|5^@ u|͜džyzkA6mWs2u.Zʞ#Xdv;H<ԣZ_VJwHt~ؓSߞ=9@u5gLcϼltT-5g߳wC"""""""I;ED={Jf@avk8e@֭ QQl/ځpвEs:O>}K.3JLL_e:xv ?7ݔ_[Ql|̞X+&TWص~}嫉c{Q13A"uX0m Iґ#n;쵊>MG%golqquiLd$FbQ IDAT(Do p~d4JgC.4V^3`KL"u*-ADfJӂukn{R2|;1>`c%yP:[_}@M5gYtl|b?fLV UkV{ ϋ3),i^B3O+ö7_K|)% -qh {3{!sD$\K7oM![w._i kzf6/F @Ҡ!SP`Vt yP+IM& ƛ[@{oS^?} >dO4狈vGV ;|ܹ_۹+HLl6Xz e\.7nE@mˎt:4y[O8NBF6}9ͳݻG^/n/ƒaܡi'J&M% JAA![F|B @l tǎ'kIq%{(__FE׳7AItnu:-)ꎛ,) z`tϬ]x i\Dꇵ{Nu7vE^~XC=k ybX|uUZ0 ZNx0Z-DDDDDD~Iaq}*go FIi)7o!99eQon.{;s>^޽j_cC\x,nbBX,:uhgf;"Ǒ%*7^}D`MQt)[}>MǎǑڄ/>o_*Y0mci~MxwS|)`"!d5X;=Gv*+£lbrn!WVPۧHH2wæ>F\lqd^>Գϣ(xAB1;PzR>n=WU>ٸyAmoA|/,S.w#>&MR{j/k%_@׮|T-ZMyv2>l~ ߋ.ڑXwWIm2tJAޝއ*22[oRV6-w|ȩٽ潻sf3 Oz}B!l61Q8~_HwMx먃/Yd!soʖPokK>­D6oAŊexklƕ.ݩDͦGv!iSnמWTWbڰ'LgwyڝNϽDٌgVlq.} R2ӆ3;L"S\pQ.yxJ*RfukyyI`2ߑ4hȯ^jgh?u8`kl,H{ ?Tvt!$?qmRDDDDDDnCԵkg,}9383kbmuG|ƏcxDGGUX\l>7l&5%yN&)"fbSӱɸJ/d/S_$lڄ{K~ǹ//{ 㳏Z̑nۮ]ɜlWS2oF /'C(@҅_cb/ږ%ڒOb?=.6>(_'mDd(/)Aݎl0ٴOa!F0IڅcfF:M>Yגf}.k5HN~JY@s.LO mbB9-4=Vqo-`׻/s}?W׍%wW׫7f+)>K _i 567ڼ@=ҫׯcucy%=`(zC? W~Q""""""'0<6]vݟ4`%rxr:Lc`^_ńzr0c /<{LJ=<{qm>&kN]?ib2J8Vt f Y.dž>!| v/ .MP/ӡoSV^ń/qIw㗜8JEDD3ھ"""""""rv4 # ;[DD0ԀVOIaardd`qF4t"a#c.ADDDDD""")Zi2D‚%*fc7t"""""(9L <(Ͳn'$6KF]d ^E \ ]RX100 0olٴ#.%3C \֙ 0JlݟsnKN?բ_Fh  a2(k4Ig#2 !Ōx2  a=o4z֘&XWX!fy#2  a1-o~z~pnбms". *N%^qDZ^RDDDDDDDDDD\jb4I1evok68/|<N:4aXLج.GN׏j=o$>݊Y[@0?P{M F `[-3 M&6,o,\kL{߿,fNc 8zv0eULb?x IߝDDDDDDDDDD$)[DDDDDDDDDDžn { EDDDDDDDDD$)[DDDDDDDDDDžn { EDDDDDDDDD$)[DDDDDDDDDDž f ]$uĠ.Cig=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""vHS-""""""""""aOa=""""""""""v{w^U}qs7nv%@@@D@PQ[mkV}cX>ә鴳wKP+[# vH! I>PcSHP|ys^    ,.@bX hT,2bXѨdLY.#(Ő{~5# ò2<; ĿNG HxG HxG HxG Hxx,U5 ]FM&,V.|>2ݮD|V) 0r8JnPXMjkkS8l}9#O* G|>JIIdX L8ѷ}9r˸lVaANN$yf-[B6nG*ͮ*5Bo0.jmi-}um򶎟V{[n 4iⵚ}JKK$G'?־|7ŸTW ĜSNN$) F+VZζ2^O'{Z-;vY7mՒWizS!]>]>c}scǫjZ=T]sRzr\r9z\k[M͂Ez;_TTpрC &MpA(}m;@>@fNWnn$^ҶQ!zGTTT9/WϾŋ?RSS$IhTVծ]uzGv:>x=c?XY3kZ6uJjNҒ%˔ysol?p8ꚓZBV$VaA0Il9Dz*_QOqk¸2MXlu$E7}fh)2 C|s#Zz(?]nIlڽ_sШ"$ïC+ %$ǭGyHƎQSSS7gѨ~ZZZ5mD͜>Ya={kהyghRmڲ]/.\/Ó0 eeffs|՚:Ң'qed]tn9j:mݶC' Gp(|ѹE fA߯[wtdҸ1#`51iv;Jo%ݦp8iZZ͜>wp W튄x./n1gU6j|2$,ǭ\7FYYݰZHOSCc%>v섬V|Ӝ35k4%eg_RzZ 1Dva7s]򗾠CIL&C?4rp]WvnCCR^nKaX4x{s.[VSp2y.wxbQW'O^SX-'s3uwYں]}K+v|L&vNN{A$4k"{]zqH$u=}ZE UTT(IoJ?)^t8# ;tgt{d-oS:ytךJN)559&5GQkb(';S'y4}R jqz=:vJk֬Ӿ***ԌS{u_tJ=V$ d9VC5% W)Ѽ93`R+K4*Z D(Q fa.= /(:SlBF%ВWh땑=:tZ[[eٔaC.މj 񸕗OÆ[ZZWviYI4*!&lV/Zupv5 f3ljDsv)gZxcd%'W!1 ~-vMFjtO<7_r|zj.z     F{ѧ˽$rsn(IZrc+Aop:2qN6h.vỉ>?x"gQCbĻyX:r ]7ZnS,߬`0rcvM7\x^`6 y4vp8N1ڼ}Wƻ~}5\M-]>Cʆc% MzyNIn]-V|^̆dx^`YeU/S3$'yeYT2 Wy9. -'ˡ% *$ydZ4rXHAY,f< \_% $լt٭1pyrح*.Qz/|n6ƻ|dgl< @og7 va7 va7 va7 va7 va7 va7 va7 va7 va7 ?TLv,IENDB`python-elasticsearch-9.1.1/docs/reference/images/otel-waterfall-with-http.png000066400000000000000000001262421506101734100273710ustar00rootroot00000000000000PNG  IHDR~P1sBIT|dtEXtSoftwaregnome-screenshot>-tEXtCreation TimeTue 26 Mar 2024 02:10:29 PM +042 IDATxwt}6lJ ;BJeI-Nb;soMI/\ȲdYHDM콁 @D!ޱ}$&r<33ÈD0ȵR-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<@(DCWO0`0e20[qX,0.^ nL&M !,&#(N9ϵu L&17V*+b1$Ծj::hmk'a; vy uz c00,v 櫻~=^/MM-DG;qE;sEDDDDDDD.Ma笸mեl<َ:^Fq61 LLdvZVӍ?5P- :ek \gTuWnss+vcdf^S?D80c$bvZ=,xe3g:h㰐ddiÓI]]S}8/|.d]}##)!GykjNWxy;tSb5<|̙Upiyåx*j719lD ǑFl(C:,.)緿wg]Nggw'9)qcFY^z ťe9ADDDDDDD"I bcEKRk?6UoM=) ]|Ǝvahjn5n8qkoavm-%k' Z᣹GQu3y|vy)1׼[Im]Cz93 0[̌wG)7R‡t8C,T5]iqwգu1f~|f\h"""""""rI7$޸c%Kݸl*v / ػA+ݕmy”k{v䏧 Ɵ$_]3YN˶׷p&ܳ}v7$6 >S8,FR\Kw]6ЙF++ɑSI8Le5g*鰓BFz*&ӧ#iKN; Blܼ Ci o.6230_su=IKK;qX&OӶS7>4苔横t%-R:$2.9Je466a4ÏaW]%Sf7+Z捍D$^UIωRмϹ`}77bE1`p;QtPGNJt3`4{-m,)htR' ?!7$2% >{W-pٷyzۆ?c;iyh`8Ħʓΐāg_HgW@ɄagZD[',)-烏>YwӖXz@ ;ظe'!L&#N'sDY,Wt\MinihG`a=aw}CϽ&5u|>aZHLˏ?Hzmam;"`0mL˟Cw?IPsk6}~Oxaw(fgQ-گ=8]΢Zg hq^|~?/Sx<C۰YL;XBTԧ8[S^|&|>?p( ɉ 7']emv>{X`&S߸XWǗߦ.p8b%(▖6^}]JO 0L1|XKysj (ry ˈwZ& y6|ֶώ5BVf:_~ܱw55um>::ܱh6F?[Ǯtla3sd1OWsI|'v FK\5筮ya'J@['5'|e?.^#Gx}@qxh]6줶ʳTT%6ńB].Ow' %1sZ>FlWr:YzOxE,"""""""놄NaÆ;yP2FPzaoa&Y-tzgΥv+~">>ˁY] ?ޟ3$7'<;vgGlF{{YY<أܿ^J8rUz~6WP Xv>޺o"2SS[φwц-w/d4h,fM'=~~ zF=@rr˖,"5uY뷒Ǵk RSOzGXŒ1X>2M-L<)aٳ0w?_~K$pD kon2tH6Mͬ^1 ##3z8?gMߴWNxg;<W6=P~ ZK(⵷d` R'Qs~`ItuyKoSWȜL2]x.^|u9/a6E F K_WpY^gŧ=j(sfvrI>޲W|;Űȱ=fuq?s/DEu$oߧ3 8npo!/=S|{y6oYv?r_765yn, iS&~1;_}!(*.c-=PHސl^EG(߹&:1u:;6AIT5ٱr(n7#F 屇˿Xiko{`ZA{exk[;cF csqQr +XicܘWu"""""""Yn/|a6lD{{;;w P]] =k4cW_淴CUU5S#G` ?Ꮸ'6&$KJ7o~^RDWwPVv:@rR"c=ozZ i)sAƏIRVp~ڳ.i}c #-{Mp#ǏUjJϏtob1G(b7y,?i);lYp UgL͟@|Gwgញ8MF#V4DT׾lSUg: y dea1Y4&I ˏ4+2ٚZZ[HK=K܄XrO>NMl7ߥt'uSj ڏ]Q x(|}.-u0q~Uv;s.FEadyZzlb҄1|{{BrX_cڏM/fxB|fq}ZĄyηb+V磾~ں:Ǝ̯ͫsbb\Y ?V_`ׯب74h̞>um픜O}9NΤ {BO$&1f0[8U^qm*k(?SOJ\NCqi911,?'0M,73U7,f3FBOGZһ0z>V`Ϋ9 ?DEY3s -6Ra0h$!hΎOcxLΞ>zzYz^` ŜYF FfOCӧ׆hd}svfopߒE}F95I֎ysMW'B +34/1$'%Ϲځ]e4Y0gZ9Yde̙ʳu"""""""rk#V+L&#gϞc!n~,;j֓=.OcSq+AKK+~ʰT%Xr2p:NHIy 9 RHCc3gkQ[Hɩӄa ao "/'_?KAxF B;W'M465&5eP&g:fc_ysK 5 zBaZZhmu TWdI@+ٌ=446 qq9n B VLj6m44^]YqEsDžF#@DDDDDDDno7=Xt /*]lذ!Crh4R0e2nwR#vO4M> GvttNl?eb2݃+Er>P(tـ ]z]xa`tiX1 q a. Jt_߀G `4p `}dɌ=ʺ{GEiYo446ц(*w ERR- #F_y*/u6v'Ogg._naXvbɺhhC0΂/K1DY,9,f3Cvf:wp;u}w\S>e"%yawJ[GH_RY]۫;1zPV+kT;N+eΌ)tsv>@EYLjs_y{@BGʬL0.8x{WIOsmO/Ă1DMfSO̘6S'tػ˸ |݀nSk_/ѣr>[|#xカFC9DDDDDDDuKqqnw~?@I(3224ibͱ%TTՐ{x|9s3a4q:8v>4Xp{&=u0 q6>/ ^@vo;D(:|aCs{<^^9jPЫϸ,mS=R\Z]o}[-Ql63%?jkظq . > ;+O? ?S455cX6+,O`//_,_W<Yz7,F_5+An2>ߟtXq ][]v:2$u`$ hlc1 LLVRʹ-pq^F# ?6lѧ&ɒ2^xu9̲pwPlw F#Cw՗X,faFM_0c8fS݄B!;xY~Kt>Z,fO>z$o.cE=}g]Y]éOY!Y86oC]}cg*鯞e=Nn$kj"Y0 T΅.50,ωs k7l5qh8ȱ"Ϟ#>MF֋MwK?ZZZzF>أ&'7g{3uJdy!fnQVV3gxꗿjx̙3ۿ!q?k;'GN#lesOX"wl, &MW?w:驃9W[φ;8{ fqAsAI>ru1a(2ӈ%:xwyW?g驃b8|}F^-FL-r9IMDfF*fٳ0_GZLji6nAKkr{.NjJy7Ypɉ񜫭g3ihK\ ,3ی'#-Ԕ+>yYx|vI1r=Kz111Ǟ):y(cȩ N2rP<;DӸ1dg}Zn'%™j֬Bdƍ[Fƌ`-W2gf/fӖ]  n0Gxh2RtM[vÔz=wV'N+);]ɸ#0<[wJ=9vpw2`"rl߃ Fvd3y6o .g$'%P~usYr IDATx:$׏j!J:]3u?:ǐ7Q qbͺdeIi wFb|Crzw:45^g1q<ܽxnQ"""""""r\nwl,Cø ݥL̙Ei) 鷭d_y\U0olͯptɼ^yuICC;ye7^f % u]mmg5·&>Q&EfɾGxUAL&EgpߒE}{,w!˖.掅ȟ4ֶv6o+o"b0ZJ/,l>!;&yn^}]׾HLL4 q7/ٶs/PلagL2gYS:{2~ܫr6'{>;q^y^q{ʖN׵_h +)g瑗re% N% Yz{nd2чϿޫ^<6ney{ QQc`Aɉ̞φw0?_l6+cŷ(.-hpL+wm>6n tu2W_{^!oGvol&&&/?Æ|=4rQv9`n2fTw9]{ {Ҳ3wtbIZ ֬BRB<ǎrA(/D|aCrpN9Y\FEY03NtpSe5ϔz},)` HLL4y9$S45a\hr3oxNrq}sK+%e46b2I!LE5 ~~@>@SgtH#++9^Tʨy}|+iyåx*j719lD ǑFl][[Ϟ A ρChkkgAJp{z};QLm]N'jwϿƙ3_~pNN;ٙ餥EDDDDDDqKOz}JJJho`gHK4]n@(DCWO0%k20[qX,K txx|A[ <"7D8LKKpɄfc0_go o """""""^;+ѣfwMJ;hmu>e4p-7< 6Lϯȍv a 455rE3axƍs3X,m6rs-""""""""""znzm4ғ_``֬̚5s蕈D[fȵ0^ȭMaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""OaD<""""""""""vHS-""""""""""|; """""""""r:;;?/3bny6֭s8v'M{&))zv[#"""""""""^'K"!!6`۩'?fRWWϡCGh&yy׻r(ijnf̨?~ dT<ώ{x{XpIttvu^{~ͯŬhV3"""""""""" HKKm4765o+ݱ=23ҩf굔f!׫ra?H0景oSu|ݘ Ϲ2P-mlش}HK;瑝ɤyREnUֻx<^{ 1!߶PӕY3UX,Əⅳq}=7~v+݋琑>κMhhh&6żS)<ϭ". ٺ}/MM8vL8 ).-Ï>vi=s ΋'"Vp1GugyŹ=#?t'O1lh.vDZZߠaִ|FBCC$7;g~yD(fæ\cG2`.- Dݏؽƍdq$DZu^kj9<EE>t8/Tfg$v=DuM-FǠ_fEɞ}ٶs/`F|ѶER/5Xjղyv87v0$&&tN77z鿕[Z[Yr5Q ?ܜl&O@W׮PQQɌS`-yCr |9@ 9rMͬ^ɘ1#}#d>z~7lOLfAl8~-,9<{+SV^rG{84~4<:;vgHN&vlDd*j8U^WxֶZ׬B|ᅨpl~g9Txy~N={H`00n~gvG]aڔ ܷt`0l⣍[ɟ455s9yb4 B86_~o$wtz&R`¸\Ѽ/f7DnMͼdi%z˩gHkk6FQ%e475S~ .j1W]{3g`XGVZayDbB<@ [̙=]ZeZbb\gF5j?ū˖`@ܤh~d>.v6/ٯ/dMG OLh($'+A=lV̝˯a/M8&#-aܻd!'ؼ}MM-dgsߒ  8VTºۨn2~HΛ+͔wh'GO^mgNE=%K 6SL+G<^/;Φ'K1,?Q#ظy'GBL¹3z.6mtuzsDz`t&Q-HHK3*$ MCcE+?]IGG' d21vpnGY`VB0V; [oh"3#{^H}c6菉Xr30 aϞc-: LOa] HKrbb8v2zFXB0~**ijn垻3 =56kkaŻkw{k6RUUC|\XnBeU 6[Kϸ1#0M)ODbfȡ}j%' 5ݯSf45z&jܵh@Vw?'O++߰  Px8~G|Z ZJZ6nމ;6Y CNp XjXfXrs2En%'9zf'wP8Lcc3. &HNLjLe%,^0 ?G"-mL`ˎr[O@[{>1N ㏯wŋ& A`sul޺Mhhj13?IԜ'\J(bLu+o">9m2#`X^UﯧfRz />}DprL_#=7z.ZG-;2q(LOxw-11L c8}k7pؙ^0QףH£x]a4/2(9ײ(F#P$';^`2Cr}nh+?dr {GEÉw3& J[ 'ĹY4&ݥNWTQxGXJNv:0tuu֊iimÉDEYX4qn0Cda0讻gc2] .~(f\fd42xP2zclfٙbfMϧF e ƌ#w>7Cf& ;9EBKke}w |Kئg_:PSS# 3׏GMm11zNn/_x^ry%b0p3;Ź^Eõp(SzJ1sw0q(TU0,/.ʪ,^0S&0vp&t-cH[w1kF>yY:z=1&c0Re3{ # PqkE"n#--뱱1ܳ.vuyɩ._=˯IJ zmlҥ\|)..g_VrE3~ݻѣG xnw.;Ჱ> O=UI縸Xl'3 N,f311nh4eͺ-wll#WEK(%l6wc:fb2sc6w1L8v<v=]9W[OW8L$Al#'cr] K\xG˅n5%C(-bYIJJ`/* A̠Dr3Ht%;vްef|Gjb6189l6,f|KجQ$f:J]}^M%r lپ00kzZ*+3=gfkTɉ=LF\hpXףm$==NG{GO-2 DY,r8V!]Oylظo|n}Enl.W4]]BPL`Ktt߻W1#xh/@ffsgN!#>?e11ݣV<}aY1^h}rWh4Kغ}/{fæ8FcVn;~M[vn6]!h4bYiiiIND/8 LυrE}wg!V^')1F1i☞"S&0uGx单ǻ l6+p>I[ZڰXL}}qO/HwclܼNܱ15S&\QQ[QQI[weĸ>+ a]"gë>'2`%!l!{9AQPu[pjkn8@AA&!6;uy]}r:XZfY99-x e2d߶۶m]Ԕ7j(îʊJy}v$g#ԈP-_Jo=VuܖgZP2jby<#}b1+:.&^۶RU[Kiʗ3 oEIOK׾ڷisceX *+v{q6MrVYB% ꇅKo5t uᄋ&%ǪBKѷ?O[ɤͲJ+*5o6r ժմos^O%ៃvmڲMUoҾJMM:a˄JKIReUZf|9CvU{tgҗVyyf^T^QҲ5r֡yp2QzZ֬Y2k/ >tr74+u˘]v$MS][ZhZSRßFILW=`lІ @nƛu|XmGc5o%Z|eg6Q1ՆaڰiS֊p*>.Fm[jUQQUdzɁ UVV!ͪEF8պU ^Aڷ}n]ICwZj2S(EFEjʵVtpbVݠ%U^ݎy\ PYydZ4""4c< |l6V.UCeZqZPlK.]zQ_p&@[l&LZ׎Lڵla*9T%!|dyNv2).֥selE8  Z^ڹ}vbur[4SlK1h%$hG}b7-|le*--WJr2j<8mzvcu IDATw׋zM}VM$iĈaǯ][>D]4Pz^Ƚ]ϗgPj:_v]@Pv~"ǣ_q&S߰Q) [{dv٫k dZ4svKR$˫f۫{tLٙj,K}2EݻtІ[mN]pmf2qsf͙*+%˕۲mvG|x]7bl6.[?QZ6WU[?,\&j6k|  INح;vKPYf7I.Wl6-XZᰫM9v߬ϦLӥSvJKMVi%CJOO}dJ CJMj6nڪMrMb\+p֮m/Q_|=[W|OPbbcԾm+&{@oZA}K.qи _(ESkjݪyO,kծM+Yfm),RU[Yk{ sԶ-[y6I;$ٻi߯.[igjjѢI#0 ~оM͛ ^~IiJxvmn W4p@_M>K;wQ ZnU/W~~I94;/IڶG]IDFF+/ь4k||>%֛FYvF 6_EEFh¥=w"uh}<բ*VlF WhҔimj&O XWks9xJLאcׁPaRJMvľ85j".ժʪ^f S q1{/QԿuP/4krMd^TG"#uuWiނ%Zd֭ZV?oѧiWշgWYNEDkU5mwMOLZ' JZYcv '.6fDjeaL)A9J2@E=R}0PqJ힥]I!J >^T@7PNKUt[ ~w7OtS^jL+Jٽy_X6WԳEclX]|}} ci)JKN%^˩dQ׃ڰKI JJt8G=~IQ/8 _!9VL8y}v<@HAA9l<@(AymY,toB)4eYd=ǃ%eqY_Ǒ]8gva7 va7 va7 va7 va7 YC]=/뗄 N4sB]nYaaaaaaaa%I&EEE*>!^3?MڰRO7gbIRjj.]~%qrֆOL_>{O>+)gCC/T&)e@`z}=E鹿S;v:s3~^/Sdq"OzfcWm^YUE)==MfY Xz`?c6 CEsnĸt-uِk\GeU܋ WvwY#r:9n_tm恰enKr-2@! Yd6<dYd?' r{,2P5jp=нΝkU2 kc;5o]H+(PYY,rYq<˕Xt?j|\qݳw`svUyy,[[ u/G+6UPRk?o=qƍiWV=g}3;m+!ݦzonlVYi3ӎ]{dXԲy.R?#l$P֮[G{BC.Ny*(ؠO?"8<w?TttnaTLkHd4QFFUV./ժyh׳4{|m+yLbc]G݁`P{ l2c'~ ЂE?uq{j:W#QNtasl>SBB"]ra?ꍷ>]{jٍ몪 ym-ܦ?9%''s6x]XxyI #hx>OSe?Sb|nefa HӥP }Z*sI&I9-i-ZGux+(..VC\ZԴoW+:Ư َ OYR-MmÏNii~So׮Ў~9}Ҳz~m+ڡ* qgΚigi5ÕcM>KM;oE99-f B͜9Gxf~U^Cv.sc\Z۵Eq3gL& W23uɅ}Ըڷm%î;O>؏>UZjN2sd+*J.*Ӂ-[ZMRqS-Rt:~ڢƍdԸqbu+Xv+*eL&ZX$ɤbN8 N=BY[,_JE8#t])4կo/=r:GlTWDz=d6ep:HoW^v~}>k4ҋteuLTT6򋻴k^=c:xyzOoGmnS4clIRJJ[ތ_T5IO=b[ZZ"ڻeaZ5h@/ KWKǟN\!1fI&YtTm$0޵vګMRLLR6sZg3n48?oQu6=TU_rZ4 aeg^ƉzGԸ$bb\# ofWPϞ[n8ݻ+h߾s׮2(^ԲvܩkI]&'v<4]ݺve+U~*SBB\Q#.ס2m-,Қu5YaRvgѠ=THt ݮ」xbUjJRSSN~1Tn3wޫ_rs[*/^~?ҥ2"l;}+k_ҋ/<[_ x=lجEԿyYY~7N<^Os_Ҳr rYL:܎$h']6***e6HIԭKGmٺMK͛fkؕj=5ƍd2caAI1hegg***###ؓ&I j,)3IʪJU/y6U@@/Ծ] ::L?.[ W0ԏW!_^\5|֭ߨIOyu_CJe2+U~q(-X/]Cu3u^vTT?X=p7\%U>գ[g9^b1+55WtRn?͚ƍ:Qtt~H{a׀>k`8zTf2,udՆ]JJVRbC+@ʫc5k}(]9tpKB0 CymOl'kG uY8 /ÐvhP ha /fR frLΌ`Аf,~ije}GJgw*6Mo  ЧN u9#nGr)/ژ l8N~ͺC] UNNK5i. a@[Y6&G {G {G {g2 èAWmUC\RB]2>MJ>+*®{}?$Y-|50(<@C2 CYPゆ`Аl!AYfy2t8ӱM2? <^.تֹيpoLTƮ4#=~IQ/߲NaH_  9얳 AA9lI׀ z,0A`Аf9'L,)S]:c7C.3aaaaau;Jn{Ie@qFǁ劰uں%0eQ 4hL=n@#=n@#=n@#= IDAT=n@#=n@#=n@#=n@#=n@#=)--/QݢI)iz񟯩p[JKt$eNQ}@֯X1֯ߨO ǞTZzZ]p5z}5fqq٣vEEEJߝ<Z@h8\-I~_ޔАK/f;y^OSJn~)] Tq vGEEFi߾Zp^{-ۻ_cn[oO&LReeuouYj/I\Ke ;a`P0ak@m4[3?=@%Jt=w3|z/ISt}T{㔗N}x2 ad u5 (4[dpk銡elT?vqֿ>4UF0+֣?@ vK[12b|>}8nz|>Im]>7A>OC\~9%pimL~.11A}ƍ[^yC[(ϧ#_L vKgxe2٤)3-\TnGvMÇ gF7"xqt!jupEFFD@M-I}H G{UrRf4@U-I6MM34;^Hׂyel@NՎdRSd6k5U*++WbBRenLniuڲP6lRiYUcٔK-[6WӦYjW'ۯ;AŸ׿<URrH{%-[\>_ڷգؘ߯Z A θ^V|Y*4bad>jժ 6c2M߯.rڶ͗nQ-ee媬T}fLege*;;C;cQ\@yhS$=MW ̌&JKMQrJN%&bT@nޫvk[v-]\yq]5ln՘ҬY~귲X,JNn\ZdS??&&~-_]KvJM9,[HUU#բyRSxlB |4nL&MVfp%5nt-YjMzwG5%[5><U'㝎}/`n06 C_~3[3gϓfQlKWZnuWϦ(;+Cyi2dY?nϧ"`YN[ !өҲZ3k$Uӆt}(9%I!8y<曮SNeTYUgj$PE2M7kMWq|*y^5k/W^=Yl-қaY'<0 -]J3f}w֥Sddݯ>ӻc'c88 vSC.Hݻwڵ}ܹK~\!w[*TkX/gSIRS~~.\;UEEFaf}z7i@>Y|%5n&!á}{)""Bmz5)z퍷Nnimzx2=]p֨%j߶9_>_SOv-ZBڶҰ/|eIRR#>Dk 6|ק).֥&iZn*+8Qm[b1`mݶCae,huTk-[uy^Ƹ\k*nu*wݻ( 0cM&,fNNL&VZSo5,6yyu#=ig[Ai=;UBB|u-[ c8Qy 5jHfY%_}d5M\k'ͦ{Jmݶ]Z@XKuҠNx+..FVU&T^^{׎qi]5k+gQfΞ}+&WtKJt_VjJza7:UVZKq)Tf5-LN$!!N_4nנok[vUVU)::J&ffsד?RUUU3O(++z{DS_7B//+/SBlvu|Ee^l73(bu42j588Ew麑Wyg_iyJOKwQdd*;ה3%4quyz`6ܵWo;^fkkpԩI 7)??OMd9%&&b޳G;wֶm۵f:Yjt: c\&iZV+' Բe edivZ6ohuX扔*SUe[]6xtj6nުO'w?I߀Hj[d2f)ES`tnXy~Ÿ\j<[f̭>B/sZj~qZ.8T.?$0la?baz r=rd2i>F$vACY ڽ{rsp8ɤͲdrOy5oYr[`R}8~V\n]:p([Hjl2劮f) 2JMMVVf1GvTyj\I~@i_z 6h}߸Ye*V$W}nWRNfmܖZ,z.Nx;IO avhDɤ$9vܵ㘎SnjnWӾa~԰H%WbQ< u)b(99I{UTT Ж-l=#5t}W@PNѳRTT_q_<@{׳Otǯn8|CM>[Okt~x$M>n =6jG+,,҆ ͛z\QH-ب*m[o߾cN%Jt:~+3#]PYiy]o酿 )7bb\ʐ^^OE*--S Zf)22B)/Xg3~Ayq%6Jd֡C4ڷtSi3ӵ#P]; _7ӥPdSK~\sIzj"I%%b(1!^+7\-\Dc?9͚fk̍7wZ۵fӖBmڴU_5kG~sȈ:VxWH XW]zwWmUvf%IsY,GhiC1#];^ l6չS[y<^Al6pka7:cTVZKq)< JTVZvpdL舯[h/>?l??12pթSuԡF5*ժԔ#EEF**2cfbc]u5f7:jPJr㣶GGG:raN٬ؘӺ.@] PgF_>6lܤ?$0lan!Ia* -ɤzܨmCx7'nWԹs|>^N[6oUZZmE|G߲EsĸӢ6VZ/@lо:oRp0jaaZ=PvTTykt @Pd j`P2x 4d,L.8 C!,$RAC 2<!epc1d>@VyhJ|͓t(*^/c 0zUv0DW]/aw}[aW(hx$騗#8 _!9Vf!r-gL AA9lJ T fO#! r{,gije}G^JgwA {G {G {G {G {g uNVy&%)q7R˧2af7 va7 va7 va7 va7 va7wTٙ}a l*,[41~nnL&(MD# "H]Y֩ Y_\fs>sg:111111.bIgv Qeij [0Q0rdpce7 vba7 vba7 vba7 vb=$:565)f)VbbNqC8p8|y;tȕPcc>"0NYfUzZ=g|=a7#鳴l UTQs_qnrrԧOo]5rujuCC'WN bjSy=#upG-[e5b`dA~^>XXO=w|rrfyO8zz$kb}o)l%u5cl]}xu/ڲᰫKi|A mY?ois G~@-'Ք?Qnnv9=]>\Yjjjsqo]>/LI`P;v+HU~c‘=|NAhԈAק\۸.nhc,׺յkZݒU4t7jm>|/]5k+59YRIǛOn+)CF $á}i9+/;7a7!߯U}&}厛yq>WuC\E"vv/YRw]ߠF=қ'ׇ+?VVFvMb(niμElPy=pXǻ{hC`PU*I6穫K/OV 9bsG.r9OWfUϒb~Ie];4ܲDVc3أUbbq$$b׵fUrr‘ЮJJL8erw[:d ^9vd:[%'%fQ)JLjj=ZZ$7^nKCma7!áD޳Wa s@pXdJrv:ڥD)6+M$UUjz)ũKikN^Hnʔ$$'iWh_tfPJ@=!ǣ\8pHlmf)33C9͑ajn+91I#J[)I>tNV~Ikؐzޯ' O6oճ/msڐ޽zng^Ps&1UEEE1ՖtצjæYYdQ8iC!546)UiIgMyLOӊUkU]Sfa7!Ţas'h}eo5& YsҰacƮ="R:*!֗rڦ͜)kQuz)z(ݮڣujhhcF{YiՓA~[*(ӡPrv3C m"ki?E[ʷ_V8jVNv=п <|D>= 1.dgaW}GTԹ@=lW#k}g:i~*)*|D+V֓O=ÕGtWn9i s/)Ԥo>n]:$E)0eZeDfR0v"1dZd=7d@nI*(W% h' C:mB\6J=xRRP=T$u%F E Æ\V5 "`0"&]> `)0,&;#΁vS_쇺zxfg^o5g^id8^?x!n۰[:߸KW^yB^}}}B!In]^}}Br}wѺ.0JMMwG.KO~K/vܩoe+ t .@>얎bi3hr\N:Vt,&n;)1Q>EGUbbn:|ux]" Jb&$'Nw~FCPl9h %p Oym2ǎZ66.s+pa Bڰqv֭UW_]{ Z8wTϧ*,W.;{Xz*!rB vhÆM>]}` T\[nK6MNSe|0D4w57eZ4j0M Q&+jvޫQ$v)Y#@kYꐛIMT^RfVnRSdZG"סv٫?^৚tD}]"vVUE^C!%$xUZYiɧu| Ԗ;tP"RSSTZ\Ą6kO"uU7x2N:f)3#]QTynۮ^z; B[.?T TZZntJK:zھ}LIVEHDysu+4s\B!|^͞3OoM*+Ғb%$Q TQGuuRU7n֮=xvUWWV~-^ޜ2M[ʷ4Ғ"~ۍпq-RYstAjİ!k-ěmwj[Ӵd UW(=-Uխ\r[oݶ]y1eff3ϾшC8A]A/2Y>ݠHPQBMnF *'뫫כSw*uoVcnjV\_]{4{B GNB]_(7'K^o B=wwh4z`\NmشU1]/2E{Wv]rX O6k}ڿ|NfkZlJMI;έ̌t#UVV+WW&U-Ͽ^ym tKeD"x:.{lc})/nRRZ? q۶jhhCDj}5ͩڹB $[ݭ[IZ9rD5#"/#zTZjq.^\%EꐛsܾzU*u-4UШ$\BWwv,Kh?.w<848nlV.g*hj` Ps_Ed?oxߞQbbtQfxu\2BUM~cqsAIҡíj_pf;W;wU( ߲_2[Odzȥt,JK[p8bab"9]ah՚=g}{wvtŘ^xe|QGZVٟn7-|֭U:BtժhŪ5Q]U֥Xi)~~v]*))7v‘l٪-[UmU{OK()INSESIQ'XvMO=Y2l =ipGڿf̜3շMM0NVfn;n=N:dinn/z*/JJT޽z;koLO7iԷOO]wggeWdջgr3ac2SO44-_Ffu1!@fԭujYu7#d_˯(++CI 򱗜rՇ+Vg2=猪 ea2igٔÇ(<2 Ъ۳ԳGWM:͠; i}JIN!5|H}7WIq,].Spݯ9_׷yO7l9)T]SsFW) ΪZ-XT{tEffƎDӁ?rsBy7 Ҿ}ZVWW( oߋթSA7mQ(B4@[ʷiϞ}[:}џWBs 4fF;*ꜯ&^~\h4e껭!YA.S= 5G52P]9"]ԫt 3nKGS?>*+Ғb%$QNg**^[ʷjUGO\K|fzOӚt}mW$ё#GH_=w몲]rǚt]v(ŹDT]]~s:TO*(SjjN>^VU*11AaJ='p W]__ݯTYVUWUokђ;ҳz=荷f*!+D ohlٟ߳ޛV>=SIQf;_ԐA}eXdGZSrÄ}LiUŹtF+>DIIHOksv]rX O6k}ڿ|Nfk|f)5%Y823UZZKFTYYV\^|u\5r-[RӦ]Qs_>ݠ}ە,Ir\z;跿ZprrԬ6Fw~2%I=i@"=OԩSoUݴisSa[?}|O{uжm;U{zF |ƯU]}~?Oҵ4r@IRuQ5654Ml6 :@>^Wb(--EW_5F{ukړu-jނzTl]:jwA5h`?E`ENS6MnK.KnKExVu$%%g?}Lӧ՚O{zo>t:[u7wf;W7lfSn]>}l%xBO޷յKf̜SZZ8^\} t:u5ԩ@.[H$Ν #j!َpf[iUfݟk5K)1ײ׸Gi~ EsƟuu 2 e$efo6IճLY}Qn*\f"!ݦx[ܲg/UFԃqAt|@0,Ӕ\N|ɂ")&yvC6V]s6|A@DT_W>=CDŽ"aC.MVu?&b #r:lxTMa ~ߏ5 afܱKv^( _\Y >uRau+"ūp.v]s8ݫzRcogx11VVcgt. !I|aH&#D Sifb a2 S6UnH SlV܏@T:جY_́:vV]SP{\s4/{[|Sb\FW6M~6}QaI&_FarYDY0Q0r2@,E Æ\,%*b #r:lmD Ô?a;\S[3ꆫF(92ߟ211111 IŪ$eC6ahG yG yG yG yG yG yG yG yG yG yG ygv$aʸzX,1r9lZ-.E C`DNM6kh2 3%1111.ə;+nZp8Kb|`HGUSs!ݦxG O  :575+blz㕐fk{ӾLŝxdv.[>nPbRc5j޻ԩhڳw<8iȠ8qROx魷gÕiϞ} x<ܩP# $>a{!bDNzЁ@@l 듵* k̘ѭپ}yڷdH%'%Pe-^-[ʵxR)=ZWGSoSQNI%z}mظY{IRbO?íXnf7Oݻw+ƴשK}}pa#bDFF2%&&l3MSC /{=sb%''I*+G?o8X}~] >_˱-ZWo~=˷Io~Uneݤ5sh]tQOiqjZlam޼Ms-t8ԳG]6z2OyOmڲUVUg}x}UԹKa7,Ғ"mݶC[lD4yؽG#G ~C>ձizu֯ߤ jb(hJJLИ1[ݒ[o^>W*+%0 ?f]e%KM?^7zr3O:G*uE=d~v;NHW@s:*,,І[TS[+IoЊe22ީ{H+W֨I2eX|"S^=p۷֢%+TZI|3 IDATnLT%?^ u7|񑈡Z F|{ vv14Mf:Q;vT>uU}VvVoߩFys6n٦qcFE˵g.dzv+kGAٙt8嶜:}CZN~{4lp_ _4Lv1nϞ}ڰa<8u X \S{T%ŧ+_6m*3Ͼ+/Waa$W$iyX婤$+(bDZ%&&Qi6}f;G^WYYku" Z|4'[yJ`Ps-R0d h•j~<_? /UqQ$kǿjG+1'4rӵg~eY[ԥ蒑CdXd :n끙8Retث[mX,x4d@={e$mXpZs7aji{ -ZTs@vVg6lܤz&aZz/]k&U9'_Q_^V"ͦ{k'#=-Ճ,=JKMV-MNv"$a׆ѭTnzt+=@HOKU=p8dECI RZ쉋ծ=5w=JLJ1)ɺ+5q8mܴE=xs u5WH$s4x@ wPa~V:'76mު΅y']emZObߏ4MIKw?Lfgs+>EQXXoJHIjnO߽b*57!ꮊڱcxZHZޭn]^̋JmV|VoOݻq JJJPmm‘mRfUܹPU5ڲu>YQ>\hK<|4bfV\\r\' %)!!Af}e /j=K6nڢ?S淏VEYx[2MS6oӴsTֵXw:餁Zf/\ƦV~'[ΎD"jhlNybH=߯>^O G .t234>]Vc˓dru*,ЈCZZs$$|_eW0V뽹UP~.>W%XнMzշԥnalilj k붝-jjijҕ =2U\T"Z=kZncKk `0$~(8=1S\듚<1DII:tR?X]{r9TnNv˱9Ya_>|X8vWќy uv=׵m{^sjjjW^=ԋ|^px$'i M~#I6vܭwf+UMQUUF[ג6=>#˥_}[ }xU֑#5s0%|mvna7pZ1|/}R۶кO70 l6{Խ[W=}**tܱ|LSۚ6}LӔnW|G}.=􀒒u^9NIRCcS6MFJ^V=wIZzV^F궛VҢS*=-V'JJLh:p(;+z'Nwz.H6mՑjeiԳ{>'g1ݹ~exʪ ?p3@ar7 "\Nܐ50?_T=j=Fo޽xTQݻu$ P=ڸqa{TX%z'=oy6-]BEE4l/\wE Æ\VG "`0"-a ~ߏ5]X‚3:oIIJJNZ8[G* a7 vba7 vba7 vba7 vb4M˞g~Sw!-r%FX x;Ch\_Ur2TWr ZYiJ p':\Ur ZA, LKV~}GFӊ7jGhvOU&sb>=ۦX\|qڃ$\.zw/R8v9-;#YqNuR@hHKT-tEXtCreation TimeTue 26 Mar 2024 02:06:44 PM +04R IDATxwx[A"w]eYmQęN&}vfwfgw;N&)3cqIf{DDR;eX4.Yre<q# Lw"""""""""""7Kay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}N7/Qg @C{ af\;.N774ul͹ N<^?>لf&ad|>Bu-fL;{}LG2zPRh[6m45r戈9 ?g.u9HLEXF%d0%5@ m; ^3g*1Q׵nCC[weؐdT;G0iOaDGGޒV7yQx o@JrtҢ? -y+L\L4x nUԱ1cc5bp`0;/tY<}>]Į=yU<L\p#"""""""(t?[Eno/B[3ښ9X]6XqNN ɻf74ѪDF?N<굛8 EX`Z4z)l`I 䒛qӯy7P]k0h`N0 N*b0h@e.""""""""v_kZyގfU6WOv]q'.oG&1 BuE#x| %#`{.𾜂䏸{m L=`0z·wS xΫݗ:TDz<=hu4 )JZ s9IOM&=-ӑE8~67$dЀ2B۫݃ #-T,ESS |5Hƌn -Nb? eJsTTVQRVNKK+bz?_aC.`(.g}>NrB5^09Y$'%<*.D||,ٙ\= TV]L--fbc$"{q#@iY9OsL \)--lE%/0d]VN*DrRUEDDDDDDD nK=vl>֬ {ٲ7A0䯿> \JJrog~^9&l(?D]k|||-{N?Q,7+㍡u7lf ݝ~o;hii`6s4a ͚jfnFCcrh!:$/v+oSuK0vhh!i)ImA֬M;hmk0p:ctIPu6۶sƎ; e-U_Ӥx}>^}c Nitma3r t6ۧ8Wu?5ux> 6XEDGuY~+M]ez*ϝy̜l4ko#22[ߥvO`B;ͦ`вͼ·.%`1O=BnNnz_NuM^0mde iϦYqMͭcYH+%*Q{Kk[;~fODW_`v9@[3.I0=ʹsIޮ 'NyXo3'"""""""r-awX'$&`2z. ~u7v`; tz&MXv+~'`1 1c:޷G>cǎ[iii_f&O=$<=GjE ՜kmu=>6v66nIRb MzjUjXq;ی6[E:z#2Qؾk?{ӥgY\N`Ѐ\:lں=|=[شuS'P?Ʉd5۴~̘2XJJϲv6nFlL4ǎ6c\NcFSx6gars1Ne`DDDDDDDDzuG'4 /|q֭@KK ;v ݇[m4c{_?hnn&22J{o G? #F |*~Ȉ9UT?˶m;OțJʛi+i&OB|BoZj2ocF DRvɸ8Պnm}LjJ< ]SIKMWf1[e8fچj֎@ PS˼3{$֮n`՚M;xY'b2 V0.$1Q<&4LPi\} [xW oqbaIOTT$?(-+g讋 .Ljj ]7w:ݎb1c۰Z-̦kz?- ÇdvHZj1Qd'Oيs8v폋tPYYE#voپFnSYl޺3f04`$cG ''r>1ֳwzl~>_'̿Suۯ{$GCsZJi)78U\¨q8.O?p3bfRRv1#t(%""""""""]z͙=+W}z|U8,mvbcyůgj4K~륦.TW3lX ~u~ωgY[}^F3&BO$&1ah[Z(Xj;ISs G tf<9]z줼3=P]{Mhx8U\JD3&n0̔uQj`2T~::)1dȠ=^fڮVg5W]VIcy{l#5%0himՂa2hkkX̙1iSt^{K|>Nn G YN.d2a , ^O?&C}ӉJl۹ܬL>}CfgsJᄐJtt$Z[ۺ=cnAKiiic꤂sT{_g=F3I܂q@Σ./;iljawifFNjJ}z}MG]466p8f˺9S~L',넄}ɭ8.$w4Q[\t`O ɉfe1~)" wnjD"ݤ$'xl6F8 ϩR~(ΰ!IL%,̅㒑5 aY9[q缡 յ@FZZ<υZ*+s)ł=k 02!.6ǣ/ oJg?4ٯtwnkA[[;m '' ՘L& Ǝ!*{AjxZZ*NgWi>/%%]#eOmmm^¥[%j6Ik+֎nlpɈYG j@NG_~M̩hek*.~re&ٹ%e2fiɌ6 nJof.+ 7V&ejE[q\ m>JJAD~Tl扅HJDa16lgFŴ^2A:kncG` 9'ؼmOW۳24~ vnk8rm:vޮ %u |n ' kh"*2p‰5'K]w&);3#!{^VZʹiSijj"""cX,$Ӓ^Šl6c˧~0p *-<ņ~{1<>s9 7mm]o]>p9ɼr˷d W}+1 6!p]jv]\ /7~iS%lپ v}f&9:0ǥw?XEKK+ΐAX6;vNDLcı_c~يs<&fv 9vk9pTQ 3$> F3sLCd9t8ǏaҸ1]e\ dBIZZo\"쎎blA>{c隄2==ѣGX2c4, 李TFbB,v^, ޏbQRZF[[a.W/oܨx"ia25- 5QzĄ/,l6+ MOKfν cigg'aT n.nl6+Q5utx٬6|x}>\NlH"HK!;+߿&YF_[7Q"aaPZ d2E[q\*+̸#5}b=PO<l6+n$2|@~W(Im\XfűE"=qr N=t1Ëd"$$.6du{$bcq:tx;6 d׶9  r1gܙ{<ttt\}.XDGEb28QXĤ =b1'J)iwEbXODD.T~f͚ItTTᄋ^g?% XVƏ/ <<~20~d .9M;xմ^~]b rs2c뎽?SL9|l޶_{n˚L] %`46ľ Ȍw_nHt +nq1L`t}.ɺ {Ԅ>YT+oϮ=?\+(6䪁d0ʈ\j%7;Ɗhd`0ȾG_->b=F}Z-ɩPK}I~.϶% 7'0gV_׆ʝ͹+FvC\\,ѩzen7YY|HԱcxǰXDEE7?%%9/5vǃgs?*#_[av"#_07`ƴ $qB 6mj̜D%f3I:r52r`23RvyKo0cxRhmkg?t䤄eo ""vFJr")Xf`C|*]`avN~v f=/,Wfά$pB k7lѣX0 "Z-\dR{#,/smhJ.v1VԤ1"6ngيu_Ჟ7γ_{?K9|`0jƣG^n׹ O%+غs/7FqG̗ح-O=>wd㖝|nK׶-""|eѣ|zyx(;w0 ;Cwڹ5'>pq^zmsW5}"GhBJ[^  W^|e g?1O>Xh@ g׿%**Sf͜q[}cN5\`ke1'sV_h~J`Jj5`0Hٙ ΔAzZ2iR!US[Ŵ/C*ϝLmL&bPGhllfBᥰs.$0LTq)5uttwJzZrm|*.j>N;)ɉdK5nim豓76ØQCn8WJS[H0E1~@59.6nt2zФS\r69&'jfcc1lVUŹ󔖞LLLy9Y 3 rT g+ 8n4aa.Np,C'-qr?- IDAT 5;DD u Q w/Ą^㦦N.Αpz_SE%7a6HNJN? OJfNk~N/aaN2S>liY9 <0s+>q.wE>"ZZZ4p zRS? |3硽Ӈ?%k6qYKitX&63a5ksLJdɇ9zBZZϑsb2ewuk]]d;;W_XDDDDDDDDDn]vY 7f 6$ya0yDV+Ny_}B>Day EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay EDDDDDDDDDS-""""""""""}nvH[DDDDDDDDDD<"""""""""")>Oay;[ԩb>h%Ǐ/Gϐ!1}gGذq uu$&$YL<z@n w""""""""""7;K߿dvhmm%>Gwu}Oyݽ.sg_ޅ0Xw3>ʵjljf݆m=pԔ$:2uLnں֬¡#~r2yi$]v`~ 9[9m8?v7[a`ֶvˇ+1z=^]{aZbI> >zCßO2QS^(줦7S&e\Hee TTVaZ3r3O$2"sh,)+w/-0>=YL&#1!h܃::INNG{vl6||g_og`rP'5yKCikiq8: /O=Ay\7-+ߣYZ,8t?|O^6蠭9YL(D0StUk6湦Ppl۱HH]F]}gLdSn:>y\@nr:ogSXf#[e1$N w‚yz]/Ee?Ovx9 nO3[ øb )r%~>Z0q(NKss+;|u _ʓ yRdfô1`Um.t (FJfFuMtȿߟc-R}sdqq&Mg箽ICC#_"  eaMD)/]dKuWݿy:g_źy7[xKc9(urwr9%g9Y\7$ٙ1^~S_rmܲ5<a x[_b_-:;1@^ynbx)W\oeִ7k2f#:lں!_.kq=}xGFu]{1qh;L fr&fϘkxؾsi Jgg''w2/TwaM.YSXA\.'.YIyei=K)<)8s:֮ȶ;:e a,_1>6|2kknKR/EOr9og? ׬~Dnn6`Y3 #X=@ll m\jo~V^˓?Bvv|:ΎwwVVw^ɪf N/l?LVfɉ;3Mv?_~] {VeмYpwrx}]$z<6&#{A.Tבc'Q,8NLWB cGQSWz}L?#پk?1|?Y;;<|uS[ۀig̨a̜6A?1͚LJJRW_}bZ6tfپs%e W死wtv;DUU5 ,\p?n7yQ u ^c'N'1!L#7'F.Hp=}BuvO ꣗?7k 7Y̟7*oAMM qʮ):]dSLc, &Ogm̞v{inn%22i0ɠz7sTn1u^`l׺}zЄgYܜ,Ů]{Yc?DEF2fkjf_bȘhʺ 1b(yyCMmm=55DLL4ys_܇X?Kw%609ߟ߾;x4yyzB&?}j4qn̻YJ ߣgB| ~vzf!Eˈpy974|^~$ΞJg[vڛKmnٱ#ģ(8u[8v!#sgtx#ƦfnJs=$-Kn9[n#35bwQWH0lɢRM-;NLTywJRW9p8^ѣ?jGy6ҲrV~:n߹M[w3qbÏb6LL7khpdֻQ^YAHOKf҄żiimcˎja}շ韛yL?CG yeDEs>ZCyEfL"Ee~K-V[Y@ @Mm={!6&AyR}Ry<蠤[wR[[ٗ]Y<onArr/~ïſal6q0p:x}]\N'eoA~3}Zݟqׇqn;?v]׉<4@Lؕ˝܂1"l6piniQ̞1p eg+8|'}~i;KVLTd'NcY=c"1QAdbӁupOGlVRSw3qh, wW`5b_;3=93'c24 # qL2w@ HqNtP\zPX 闎aӨſ`M-ج^' B#{>f2~H&4ze7s2#Hxa5uFSW 5̻oz(< 51*Aq:1((bݦT晧j^v}$ kU_=.NxvO(njjkO?NFz׭rohĩ2*}sً-2L8vwf0d!L8b0eXV s2w('δI]?[rw>  wCFz Ǐe+S^Y7ꋗ ʡ+P;k 9{23RP0 0ȱB( 2ur'0 葃5=j]lؼGrR|ieˋyR}R\r/lW^'?5?^Nj;xKOn̜;Ktq)#F j[t׵Akyl:.ea2n\>] &ƏƏv"7&aaX]%&L^r2رk?lf҄1È1n (n%Xr#b2Lp:. 2 "|8~il޺u?zaa._+v+o-tq:L&.aZttt]$::FjJ.$0 {JeaXfK<5?m6+a86L&S(p %"">X9ӈt`0ȹj^z]G m]K/HO -gYqmDCajt8 N?6o:(NFzUKHt=}b0{dfϜԔd~L2뇅9$ l6\ήQ,8J|l Yp8H=*&:~q;w_TNovT{l&"">IJVRRZSE{Hd"5%^&qw8dfSQQICcn~HnN/[֬M(..a߾ClظW>vKC ZVނ`Ё,tph!,~aʴIcIe8/\hjl\cc3f麾v;ٙ8tֶ+ުKсY<<ҫ`ZHOKaꤱ_gwDDWoN8apD[t)8jW铗&;+SťW O.\paˎ=\p72}^m<pq!aa.|" yR}R˞&7'].8jjhm] .4{|v؄dj偹sx`[8QxW_{dĈa<4ڞn2ҒZfn׭i2 `İA{ohdيuڻϿF"#\26+1ѽmll~Bq[jeB(F LÅZ~o9U| ep,յu]2e}}#ͭDGEޖ`c|b-qQ {_O/5uDw+!b?vwfeN[[wDgJ;KVvS\;̅l[R]Sw9fMqitPvw_AMm=>rg\ot9-k``2LɭŃsgᣅZ` 9nkr9C&Ȳ9rda:OOʝ`Fnn6OHLL99JTԕKM:o%f2JKsHg\M-aW.p m۱[mZg0 >rL+lPzJkXUW!0rz}4}{%&&0 UUmn%%7fh[W~^\QZt* <*)ժ Z~ReZ4k"Gu3lsxVݨ^=:+:*RQӳV^⒃?09'+}U/ΗjoVt(?6i j]۫bch :vS^Q7ߞ*b׏9iht(RDDZgehj))SҾڹ{&4MSkmRtTZd* YZ%9MGNJ!thO> ЯzZf23R߷,Z~k7)t9_VEi~Fm 'ݮ~z3λ1#!bBMHEշoO嵫m_v5g^?R|\m~hg3Sӿ}\Z-:|^}}*ثTj7mjO<ظXO*..F2[QWڜzvKI'65w& &'Kmsկw7\NmݶK{5+|)0mn͙DJ(59QZbڷk&ZpNM6C{tfӚdlXjf𑣺1RvZ|^ѥvɝ{c]}fuǮfj׷Z%Zr3ӾsiZjf]/:)1!^ɪ IDATCEKW*)1AkU6R ͞DvKZ,ںck:ds2!.Vzw׋]Ą8#;'].w-ДjuN<V٨Hu4wbn׎]{TYU ~vmrԦu>s(1!^UZ~._ZI$i7Tz]͝rseٴgOvܭݻ{̱z{9msu1vWu}sr\ھc5l G7j*^#F פ&VdZkmR⋇zZv'WM: <ɺe͘5_>) DsTsrJl-SYyb5ҡգs6>j¸Q5wV Crs25aܕ=q }}.y}>-^JKQ*%9Q]2,Fm{m5q˹LvVaϯuf[Zq1ҡӻ+=K+jwW%^n~=* %eodaD$M|} _zK_FR@;UI˵h[:FzDoNZTwNZ*]. HZ˞וhhdmBVoo.)ӝk9U788EM 퐬Wns6%z>!Y^ͬEtn@#vNVY,Ii0] //p=إ5y;bbCG yG yG yG yG yg v\BS W I亢]n9!!!!!!!!ѣTU]-C""'%l]YY&O՗3#]vE9bÂ\!a>;}'\ GSP/\aNq v&<|\=ڨ19^o.W 8{hU '""{ ֵsUZljZf_mX4KUQYYB??<̓6MSw=?nԈ+.mtU"ԅCpSL멱y=t'Ir96!)x|2 `N"LӔbi@_6Ev[?fʜDsbaN"|<>\N,o (ev)rMd^ˀ?n~^eggx~qY^i;4MY^?+;;S=37vk4i{`nzuLЂՏ/p.jzjٽ{ڱsbwigtߩ~w''ިBV+\rVnŢ~}{'Gq1ھcamݶC5nۡ{XwR\l|ԯoFꃩKգGQWfkC~Q= ڹs!]8lfΚÇ*77GSݣzz4tÄqr: 碠ѧw=PiMJK(++CO>ˋci萁ZzM3 /ZonnAo5k~_@@vX\\~Q]k+)*ɐaW]s~_9LGK[oO֖-r95?rj;wҁ%I豪r_8$ݮ )3CJ-\\ .ױeR]5h`?EGE5.4nM[d2^n;} Y, A2t?yѽkˆ_Ν:J_+V鞻~,è]t9Vwmttƍ(j5ڼehuhNY_ ;~NzW[oPeU/X~/&7 ߑ3yj*??OR6lڪ^=:onѺvr3OyFoP#GjKu.x3 T6mѫO-ZŅ݉4~`fΚ];oTۮd-_J\}e+.X)uǎ;gAx_Kskwޫ>D={tգ߯pׯzWw_>|&oA ڼPUT~~)ߗSm֮߬+G\niؾ^|e~tS^?wmݾKoVb+?& Ph1Gj'7MSwս?{DNS=PkbccԳG7͞3_7mV &K/[!mX>Oھ}gݹ|)mT?IeeeHqr:z귿0)á{ZNCsncӧ}~ m޼E@@QQoDIɭd3 Cm>n[Y.զu+dTбԣ2M;5ԥs͝DV|BqZk܅Z~nڷk8IsR͜m"ϧ]4lw-;molhf6~-өGOY2MSoS3,$C@t\^xVwDj]wl7jhۓe{jϞ";ZqӸkҖ-[e;M7z(++צ[$IhXJOO MԱcez2UUկOS^[*+f__|Sxe7/uxiհc^^A+G^" wt8tՈKt Tm|UݒJ띩ST 著mnޙ'%''t5xSX|y Y^ۿ@ܳjjj~&|>uߠk=}+ ׏;ѣG꥗_A~ai@~2 C! 2P,{wڻw^X$sۧ"Ie5ͯkz}r\n(痕iނod.6PI Zj.XxxƉsl6F]qR}_Rg^RLT.dN/Ӳtad2m߱G=Qym[K>ӕoۻ?vo~#GgR뜬ږ&{G~T?o{WVVj߾oWNW^Vl=ϧ={-S>fϙfk(!>j}43ymk[b/_^Tǎ;NC?.xOSnuI\+,}2}^|U]~%9Ή=wD.?'xL';'NtcJf͞+IJIIQ [M}{uז{MO&NP䷽̿0 2Z9[\4tf\}%TA:X,2N ;Ill>T|+=-Ei)j.Wrl;-w[ڵGOYUWM][=JJl%ɐjTtti@@3vNzuP),)/' M-X6m$ 4Y]ͪԻgW-XT%%ծm 煇jUB auxx<Ȥqv ] tam.ܦ5b0 WѢ=/{^I҄&ái&զԔd4z4tJk׷~؃$P/oN酗^U=En޲Uo]9rmذI<7٧hydȐG X[kFֵ jWQlV@[),g{A驺q8?xH?~4]7pm s-L [EGKWQxD.:@1ں}._8&K>Uk7SJINTB|u騯-K5޲m*P~huk+vn;jeMn$R)v( F wIޝ{C)Iڱ}>Ə. $Iz~嗳uQ2MϿҺԓPzzt葚:u 6mf6o"Io3znÐz;wkEZfbT]UdYuMc)If*MlJKkF0 >L҇fh¥ W# ׍VVfy*񪪪KxKYYY*.9 <-h~Ѣ\.M;^3MjlV2TNN"N$+';K1xXeee4uUubc6<^VVu&LJ]:iԨV_rP-YL+WEQ 54RI Q29MlT("̡0Gss$٬|- faN"LI~@aj1]P7'-! s-iJ@@VELISY,2h)jS-?`*0ek<ڰe (I}"6v Lv7WI\f@T0$93MSn_!rj~٬ m@@_vh|9rXeaZ/ /&  wك]i9z\{Jt^<ŒoU!!T?, IDAT!!!l.ګYk&dghj!v)a]fv ʪ=x`wDn1,I_{vgPYY\r\Yڬٴe~_jvF]>{*::QTUWR5UTVs=HOS하JIIg4&Y3} rtAQjjRShջPa:|䈊KXoOQCޣג߾~=w~&ϯk+&&Z>3ѵc5Vyy P2MSI о\'wB[ u.h/I҂ExOy~}))U= pּ?cQ>=5j{Il ILWU^^鑑Zvf͙بjO ?~~9N%%&x uq9}ܽW$b0ƏVsngZr5tuawۭ-See z}x:X+Ţ­u?URR+tWǂ|)%5I1qع[ڵ{6lجmwHTUuPi6Jxy}zgCuUlL{~Mz{Yruon~}{jOs57J3J?I+7lܤ&իשVvVF_5B htI zx_3Rt?Њk9w9vSWߜ,߯;kj' x>.hԹsǺ>l^X uYC;wxN˖ը钴6nު.>0 ٬V]OӦ/@ITz:uuNE~]1|OߊKhͺMѭW_6lQu޻OvQXK(-5Ya4?l*ݦΝ:sxF~_^W~CVUNCCaaX,ڱcWո}NU?FEFFK=S4h`.]p]0<}kjtu(##.ݶ}}GWvv,*4նMnq|,)9(ͦY7lj[EE%JNjNy)_vM;w={|<Ǫ[s|B]T__|9Wi)IJNj@ WhG1WԵVhngӡCaW_V'BlV~r-uev ^}_:vL#aH$˶|iz5vU1n0&\?N׬[O^-:ccuqYoEtIAxt$p4x-;nf]5R6rɔWhH׏n'áj^[ZnڵɑlngUqqu\$+5-E኎ n>-*֖­ZfC՘vBNSq:vV+'SUUu7*66Fݺv:ؘhuE7Nƍխ[:;SiZx6W@@[w3dx<$UUWk糔*A=4ik߯j d׆a.My$lx0@ ଊS~5 srfnjߎƣ߯Gjt)ЭL3Yjbi Ʀeeeqt*)1m6dZUQanӄW7'NQdDLTEEr428ްP(!>QcJdDڶyDݺtTymngUTTvu[OnP:zJek(dguuRf#b@ {mZ|٧pM11QZa[7x̪jX WugNCVZj6KS>TmruըK&MfF23uY ir:vu`2tT~_iGu[SRb+]4t`6PF)I w=ڶ}w _~oMnIOabP]r@8XE%j?3~uhp8pP/-jUU3wA0i*aܩ;5kןeZfbccT+"9;ٌJWLtT4MS>JYimpM7N@ ;2͚PHOy}{oΪ}5=%%RA|u,W|lRRS;w\vц mIRddd0?maq5kjJtQoИKҾ?v"zKv͘9_ݻ4M-_N;vw_Ӟ~ Y6=vLFi) s9|_ngUZjZno_>*t\o WS j*A={tS۶q_kF}^zT׮۵km)ܦaC/+8{Bmv͝X?%ժD]3ruZР/ :4֦4wɔRt^ZҺs4zp[D5!3`uL]4t@zYfԹSGuQWr;tjpt(<,LY7lJW\~35yʇZjn23h>"#4jE:;+]*""#d1 KjYշWWUF@@6Ma  k}5v]UU:tT11r8r8KWZ tODD>z[%hқ/<$4Omlӡ)qkl6wݺ4ەנSO8n۔j1 C'@ b[,EGG)=NYsݸ˯QQZJRRBr:|KJX[ j-:7 YsÔN~:C>\aa.\.lVmvYmzxU]?P.ٽwO $ngbQnnvu*ܺ]۶nW:zJek(dg*;;Kې'8zvﬞ; F hڵQ^6,؅ Y]g!!<\k*=? >@d Z? œD ÐbnNZ C$ZӔ|h dXd0'LS)"$ZT `24]]QfFʪ<7ODSaf0MlAo-i!!.)>RI Q2v ~kI>$B'Ir9@T0$93MSn_!rj~٬m6 /7"s7|y|~6YZ//p=إ5y;bb7G yG yG yG yG yg v8\T #]$X yG yG yG yG yG yG yG yG yG yG y<[n-! %"K]:ju3שetNΌ֭hq) &DAYf%@BG0mxss a7 #]{ё.ႅBa2Y[ BM2x&y`3id4L"|6C G zG zG zG zH& +ϧvO`P.cLf,ffl6lv6[*pX jllR>d~Q;}VJ2R]]jXá8\NRR!sd6RP\=\>P`PWsT]S#ITꟑX2w9LcNV}C/[w?V\\Fg?[˿z'NG]w.Ԝ'?=u7m#ť)Վ{~fU)Ը‘|Y\UU :qEب;JTUuR@@Iɉ*^Re0 ê8{ጬ625Oua55nSzz?1L1.W/-WBB&M%{UvB.S,9jUTT(99Q#F_>{maINUk붝nӐ!4bg^o^~Mm۱[ k\(555i8;oI.Wg 5ybv,.5gj҄n{C ~ۭڶc/Y*(.*$el0?W U[Wnw̟Xuѵ+*U(fӀLۢ{ȡPHmO>Z|>LFŸb4ixRMO>X+WfL&9NM<+33C7o;ٳ-vEԎ%jiiU8fMkjV:5u]z߷Jm߹GӦNЬ+b( _ߥm4mʄ=7+ШިAe44p~ ZzdgwZ- NL-E-ڽ{***Uy䘚UqH JK@߰hii{Ay`lV]QK_#v߿T~dY;`TŗrZnݹ"y5*Z\3} 3gsoV^P-ʜ.5ޣXoj\hFIAO"mݶCƏյS'^-Mq$' |-^>Zm-[/zIrwAݧ$3REd4WMU'{nuM**k2h@qwK\t< ц[tt=Z0wRf;ڽT^Wfg)1##l՚uڳk|>GoRuuRS/ՅhjUZ=ۢ؎@jWh4`0( ȑUG^3W)'۵w:ӧjꔉ+?r:jOrt9Ν%jmk3$3.װaCr9e2 ;_z֌Π[fN寬P}CBd[Wl*ͪcGw\KKRRf6SKKL&X2J:&롇8[g^.ͪP(&mݱG5uGM:\~T?)I>Dp=\y-=Ȁ,Vtneeft{QN֨L+վJUSS8|#>>N&hB͝s&/TZjp;:T&I2ڳwd2tq?ӧtt*8={+66cEs NܠS/jUϧW:x3h2ex颯nZXsgO䉅Ǻݲ:yZCeϧźd\VES.#rU]S҃eڱs7oYWhʤV@ U(99I.S1.r}}>d@4 U] x%x║O׏`0КŚ=r]5cg:/-5YNC%?䉅]zo߹G&I{\U>_vbאAٚ7{z صWc +&= gR??3K:WP?^ںnc[[tTWx-M?G?щ7x<*Pknr`0tL :X[TVv@ 7W#y\uf99O׶n۩\fS~~N~f46vvۺuٜR ڰ=bN.;غY_Sէ%I 81LGUQyLlRjJBGX- 2Vvp_Zm۱K,hfh2uY-I@P_@mm^1Z0/ŬÇ˵kn>]Fh ZzT8@RƔtϽt 4jpmذIӛ Z%I?#]7lRBG'MP0Ԇ5ڏLXz뮝/O|^|U.)٬@ pQرkV|>-֒%!!^pWe6֝Z絪mЍ͕`ճb7ofSZ}owjONNPkk~JO#ɤ'|VWLk_&nnt[ڪݻRGYGtQTy<׷$iMUG޽t7AO=XO<'d0dԷOo{Ϗr9 u6l~gwO(UMMb5nwGR#行FfLôdJ*Wޕ]uu]q8ur*5%YcFRffjͼO;=\rrfL\.6|>}>Am!$"$&&C %L&bb\\.zvD=n@#D=n@#D=C8ѓ>/zJ;s$Oމp%dX4{85j.Pˡ+:m޶/JjQ:T~\%"]M7B;ҡ.PzTM=LTm40ƌ?ݢ3g#]kT^^Z^>`xFȹ$s/Ť/Mv9$lg0r9m<lVR<<N"1Ve)t9ccdWi) .P'VVYE@)IMV t9NKds_|XjzG zG zG zG zG zG zG zG zG zwoIENDB`python-elasticsearch-9.1.1/docs/reference/images/python-example.png000066400000000000000000003260201506101734100254670ustar00rootroot00000000000000PNG  IHDRzA^[UgAMA a cHRMz&u0`:pQ<PeXIfMM*i&zA\"3iTXtXML:com.adobe.xmp 833 1402 1 1 J@IDATx\A{[uڪuUj[mvٽZֽ (@ nx% ${?{߻$s=Ԩi{@ @ @N+Pi{C @ @@' @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @uC @ @@ @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @uC @ @@ @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @uC @ @@ @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @uC @ @@ @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @uC @ @@ @ @ '@ @ @ @^ @ @\^'?> @ @ zq@ @ @prz @ @9@ @ @8y}@  0vHjݺ%%&%ĉݻ` @^^0ga{~NF2;ݚ 9k)25a W%J5=zݻgõcU@N|ͽ\<**RSr*Xz`^n_}gӲ32?+.Hƍ0>O~ʻvM7nܰφV@ [}{e^4vܝm͚ȑun@o~3#~j e+͆ @rf)}mΚ@(bB'8ѕ^{%qx/qcGɉ@Wߩ [I=7wVx=YRQf`A/ZK,'4dpW_Y" @PUrY3}c6J4 E+Z ť۷r" +v:y*RN;yFw4fݰa}[Qj 3@ P ƙJ򘕒l ^g;b %˗rpؼy3w}@8v<zn#=衇 WsB @ [:v 8@e7^F{w. @ `]ʕ.8]7?mZ9&ӟqCFⲾ{ ?T ^"+hRbBY̒%KPvmj@ J^)&,޽:lvY |){QA7ru-ORrJ29EwÇPz]EJ|Kr8d$m۶SV-nr'"NQد7RJrz Hoޢ 狗gEJHH iqVq s^:\Uw{2-$5_OAUЭ[9˗<\ ذ!Thaq-@׫C_eӧk`\:ulOQJ+Wq 뉏烹%V2kv҉j К5tQڿK+Sf"t1-[5(HQbb'ǎutYp!ܹ#5nܐ<8_nbci=^vx)qPZu?s_ ʬvyϋڵkuoڲDkԤ!իS+*gef,*Rn]bbVqnkg+e-zSѢEgfOs\r,hv2ᇻ]>]`ݸy:o#|E|s}JYΚG[ٖ5L{-5SR.@:hHב9'?7/qcK3Y$ ޽'k0 @ zDsĈ.|/ۣ{WzqjOe}gUBG|i3>"*U|9wD|׎I34 (3]|Y3:wjO7ޤB_z諻-⚝@o"EhؿA\R.eUVIͷߥ'I&Æ RJҥKe&Po<6l@gϡ;wcvԢE3<+ tϭ@o@0aqKk͚̔ Co=6|4>KJJJRX rɯv>Tv-+΋n:;/:yW9Hn=hʸI|x~^F57m5v6Æ ?w<5j@5N0%6m}x7n޽l ؾ$ȯ'{|>+VL= ?9+U;/B0S Z'W_~G3nĖ'u?s[iϵ׼s_͚56ɯOD?>Ìk?ޣA @.k;_22|9S칳tKd)`grKB/F>=Su8C-,,BfR5p  'CA_~^]i{Q%2wu%΄DeD1ǚY,fϐK|uQ S /f1gͶ,T2,giZABKdwZ1,H5ݕ1 ُMO$*9⼏Jv0gӧPiI/ϷŹiϊ\G.I"SD  eΙK.7rdi ,P4ٟGW&<&%^6ԫܑ*Ux~1p}yNju\dfhሳij̙U?d>U?n<4}RoY"ri~٣fIA6.Q^=wާy6́\.u2y4:$~P7o6g{^gs■>_a|eyNxFEGK'S([4>ޙZ(>WO\`ד}[=Ì?3)S"aGi\jğ<̙ dўfEʼn4 @r*PK\=]k3wq "4|֮#7,[Ѷ8AQMyy&,&y:#/0x}}trݱcFQRxI ٺܸv^ݼyKʾ۳- //Y9q %_Vu raJyyF㈑Q?6pAI&A^M`w`dˍG>H xvԬO?yނԌ00dQFDeO.k|kZ>7˙ /f}(5?ҥ#ddsmOyy͖Qw9~l~sV3cge8ay)A^=/V!~=߸q^AA"ٱ}20ިqӬ&ʕt; @ 0pjW)z(vo?śs&܌)qٹq U;?n^=c\]!̙hƥ 0_̙sTE[Ry^/g!^V֘#zgۣq~)~Ү\ g,r>@Tgo&r4ØQOȬ*Ͽ&,\_E>2s2,?R=CW!2A흙m4Aڴn%.w72wڵ/J "#g~8cq}RvfUi5QZ2e|WlڼUw87/_?*9qSwvZ\Wشiӊ\P{SS{hoׯwיQE;ʋNzE# @-"yI$qf(ͩ/9N.j*m}u|9:1Y˵z|wx׵/t6x[DNfB~3÷)AV9gyW97L&6\3,lR$w8Ѕ! f~M[%EYz=lv=͵qm+W?7Lx-_].^.׶sΩ5Tfgu*6z7TcȒ@/7{*sGG|ҳl5?qh*#,8ix=ƅ1QSM>ӌUh @e K,]\:oֶ`h[AdP>*W7x}o߶K*)Asێ_Dq/U4ͲQGqΞ= GA9]okIG[xTCʔ(* ペy{yt@:R3Y/S3Z9jkNds_)6l qlSӫ9?1(nFNk珞G^~>e7M8['=GgZwWcַ2iWp*.wÍ? u4@ B@պrz5ݩٿ1&Л]2f.c]Ԯ%WYNm?nʝ7oF+.;k6HoKўKՑdgx[soeg|-_-ekZ0A<5$͍S,zsN^І׵Y1Xp1q6/7ql4\ sV^ٰb:r!''%%TI\ `<)%%EKdt=^Ox --le^;.(e"Ы{0 @NMN̡2e3}t̉Y ]3{z;w^y ~]o3m ݺƛ.nbqx|y#gnrMՂִWZx9svstt5KcvƒkUiw+J e5Y֜u`"dw䧟}!;GzPqRK@ln_=\\d>9u˟eHxQ79n6,JcrersY,@ ejnqЩwݵszω@YhhRS x&A^ԼyS QkzlhyC0uhWiZ;^,VrLV{yCF>l6G'O44G+PlEY-zxxȺ{cY*ٟ xt|qj of*X.gJ+ރlyng/_ФI#jذ|/}lioqc@ @N Ź\]-Q95,; Gjݺ%1U*!t|G{z:k*PA/aaq㚉P үM`Ms0ō|eV%<`0ƤlD@9ŋr`)ײseu,u9Ҹ8qT6fݻȽ Aknp0t @YPYQϝ; W9e6?hr3Ynre*YٻLsEiqeڃ>>@cE7iH˗z/*_.:wEZ,ZlLTsЋoVƥKȃf O0YE^um ЕְA}YWynX_\` )'}Jeyk`g]?+xݷnR7ƐQ㴣leK-00xdMeK\N);j^G|'ϭ[M >;z,?-[\1_#8{:xDPE @, ^δMwiӊ3ioNhp/~^֭:3^նmAmr,{\@]]߸qطarǟxLy'GB\6#!!ΎfqNxfoڴ U%GJJsONNGʲ 4LϞf nX^RWGi|LlEmծ8`^{I+ O%#a.;Q_Og٭kgMՃJ@zBlZB*_pa{Ŋ+ Ν;/o $|ݸa{39 @/_ku l'9>*/jYj|1#tgVڋR/Wo&yv"2hVn5lJ.|K/>+.EzuM6_,"2/~OR ۷kM;dL:,Fةc{Y#Zo=]_JбC;{n)ͯ%K˾sFS_ҥK  ƙ{GEoFǀU33P)'nH9x$zo*۷:vhN,AVYN-c<1cF7~f/nv1[칌ZDǎ>!MNdۧ 5_e7`?mz2:t6S?i DE@ @e"ar4oǟGǏ bŊ/%ڷ7q&}͏&G\2Sd pբF!R\7/OmFe&1GD^V3R}>}t1r)"SـۢEz4V"oΧ *Uj׮,.yؿE&Ӧ.j= q _Tfiؕ_|C;v2Y=G\Dy1qJ;u2k===%Ҧ٣%JfY+TY J ׺ޭ rFDDg(?@ ? azFK>7+Z?SZ ֿk^7׷xOLAW /^ԯg;n쓲dgoYE~94ى[n ;dy׮]O]v?|e"[,]qgy0:ŏu"pbs:vjOCD@\i@4y\d=3r"@ϗ޵O,O>!o?)efLVbf#.WR}tu֕ٷZ4nG3>26n5W"h߬Y9_ZUZb&֯'co7l9sεz:~j̴ɰV\EO>|ҹc8>`@ 'W^B/l\f.ʡC?^NS]_n8efj]n hь{-$ux a4i4zeėsӧ،iD=UTӶu7z_~yΫ溻}QT;/=_h-;g$9Z[r -V(ۤx>{޽zаa-nfL? ~yWMjD(o1a‹4k ,tQg(ɡm?%ˁVsm_ѵkWE0k־[xs@{&gS:)ֶ8 &3Gnj2#'ի,|_|KJ,%v~ag'mvX<'}9z[dbΞ٧fB[\DG|=9_U3Ur_~s;j xa[׭<߽{/=9i91&pfo~]^an(?gB!@D }Q?7qs6%'P㛀X۸#g湉llҥK+}Ao||FY eV~$'q S8IgD.eh*e-ЄD7޳i?*|z!qd9q]{9ЫԱq [URqū˃ĉk*&4Vgn=q(ԋf=8XQW9sYAvkשS)Ba7h?\ sp#$ʪȟOHr߀a?(pծJfSNl6x:@ `CV[k3׫]zm=fCr8{Hw~ʵ:Ѳ'%݃/ȿ_֐5ujx{g^im]u3/// ;ŝr9n޼5}$濂_3yݘ;fx=[w~oY4}'  @J65g@jܞ@onc{(SƅڋqKI X=fɧ|itЃ&g.hpQJO>&MQreI@ d Лi!@&𼨳u[wmP;PT)qC3׸/m9 `I`cCiHӛYZܴ&sf17!|sܸg6@ c2Yl2u~g@oVB@ %o yqqYvVw'n`!z&*,=*!@ @-蟥Hn;b#k#H@N*7mJw+V_dΌ'##3qmQs: @ (Geզj2MNI>uaz$so= ?@=CW [ Lxv"իS֭M^'dLL EǜсmYODFQk/+m[wٺ9Ӿ}b\[h @476"-ojCpB @ @ @jBVω!@ @ @!uÂNA @ @^^0' @ @ @! :@ @ @zzœ @ @R^<, @ @  sB @ @pHzS @ @@z+ @ @ @!uÂNA @ @^^0' @ @ @! :@ @ @zzœ @ @R^<, @ @  sB @ @pH"+tʮ>^An#**RSl=wwQ\߹(*:f[QR%\EOuRʅtm9Cez]h(Y, &&8oԎ[pqsf-nt5%tk&73kA籧YE)Qta=Ȅ# @ @ #}o޲ MJ;wYw ӧ7?lz+[ON1&R}O~ *\Xh uoI ~|*Sg_4Qg8Лbp=JÎ/N09ރLH0 @Hw·QĎpUݨپ#*W^wXQܱ#Ӵ#7 ߚm[y\e +*8FG׭-{bT18woQ|)G֬׮Y\maV)&6h$g]8Ne=<8Gk#ߩI tQԄxw%i5z |9L[6hg0 @ @uC`,VM0JΞ;ouLz뭩԰A=ըQ G_^`0YV=!eyWA<=sʾܼ1i"5k! Pgֵx ,EB7"OP&Ml, ߼QdIAjx *_e:Oߣ'Й͘F7^5nF&=Cz}i{o{f JSݫ/ѭi؜T ~蟙o@!u@~7}sݓc8oחvA!mѪi'a @ :yݽN+OϊꪟyinKD~~ocl9 D!51#iVA &f[3!!1RSӬ5pfkٳISW>JÇBD85iN~~MD&uEh#r:t%OӍ8kܢqgY+;ϋ:dMMLGשRh-oEG'O..tp2N6Ȱ-U u)w*\+RTTI9y/EZG0nATK79}&A^gjحRejҷ9YA^<י%2ԕظ2hXP߯̚;/ ?NOG(%ٲi"W.ke(].;شM{XWdViҥa1C U77<,aҶ}|ZlwWġUeRO{Z׮_B'n$8/n% `9ma(sPHԓ圭ņrZr:d+3.Q10H^NܳusYJYtnǯ zm}>@ @GXQݛDj[niYGvff`tV7 Oʼz졕˩LKuD^@?qgqִ@4k?jAjRfx}\jJ\/kύv%J㛐i.7/.\lqk :+ f{9[HTҥ|4ݞ{d'(~i,ռe=2R8j~g~L @ 䕀Wv@+30$J3Xj\ٛqyM3nzrE>_NHmڛqP8DWiUe=ȖKӵ;^F/Һ(b<)@IDATըvZA1+WK%HӜCH@VfV B @\@?:BIdjwni#5ׯ_< X4h(1f 8˷(3mX'-nuh[k\Bow4Y%Dln[7,Kھp1 ̖(r-ylu;)&:[;| 7>(h6iqzԄ9X;FXWu+9 @ @ /K}'v(B5do˕/g׮3nڦ7?k$/e6w#ok/Srd u_No_zĵL\s7 #nJ(o`i44BKɴҚ-q EafYf.9bӖ(y~*?Mx@ @# HGAY`7"PPPe'K#Y@%Ei-)lFӹjqh u87^+ftC*mr|\us;ᢙ~9ΰ_\2lpI"ӖtZ5޿ɛf3 Y^h.YPڵB^wۇ @ X-@TwF ֪V jꎨg{tLHKjl7ϡU@/wNgUoF399r(L1nx#-Md[.dqI*ڕ@t)6|}pBOQחIbΞ[f*X \XQ10g`ѹ:n'`f}H @ @/΁,/!9+Cv(=`ܼ~ƣI"(6r8yAHsGRZdƏ'.)Utf6[cg*T(㞍чzl[ &.$2uCڵW2 oH-8Y?NlbUzr,OvLQ951ʸZu8#sv̬̱;Ԣy33E@O @ @2"8gireА!M`%&1B XL>UaOtyzt<Ѻ-TϨ툋5|:jǂڎM4k٬ *Ug7şNg iVG ,Fp8]G jLo8z{}R9cn RٳxڍGc?m<!@ @+^=41TJɊDOiUUrGSf'5oT?9quYƜjAԿj۶A""`f*euyYY pڷҒ2/7]ʾc˿/N~9EO_5kQ1O7AM^_UCSq 7mۉ:i?STRe mߑ8Ыfwnޤu|A}L?@4j'2DȫPٗ*Ԥ:]_XtjNeNGR-TUk*A#>6n r W6fFs8f9\Vf/..Tq?}&\ ^>"~h11B @rKܒ΃턄Ԡ?x▹^ke~4頁… x2zr=n~ݞ sęJ;v2'.^CSQc^CȖ],ihW|:$k/3 Ink4ZM"3dٲ2_m}-4%c=sw!Fۮ߽a@ @@V ,Z/.#M9q^xq%$df2̄ttZUZ:c<8#v?Mn.+5g&d@>rVo&g))ig_ƓLspq#vƙQO?k|w~xvzrƵp=ɿsd7nm1xԒб prOܜ{pY?[ۛieTx1荫(5!`5%˕#WWu\BT:)PLYr;@zҞ ~D`8D8ŝ:i0.?=)M.nnrFZSWp5V]ݣ˗Of]٬.qRΟĘh+2($S#TN%&t|0&?}=CdzE}<=Xɒrܭ7gEx@ȉ9Qg0S˯N{g{XRڅ>tTxxcuFWT[W7ɫjܑ۷ҢT='qzt{wyrZTpau\B~ :Q SQW)Y9[n][;Iʱ;˽fuI԰W N=Z8Ä&ͨ]n@o~3ŲH_ϮF~g=ɋrs>q~z){n.֝>늖,E^\gw"9xe hP;{ zse }rN<@oK%\\\mkd2hsx_uur6JњKFpP"׮\١ *]G P`yzQHr"Oҙ}{ ̾cG!@.@!v[ƍ%赳V_m;J"ȗ_JbPqڟ\nᛱ# TkwzW s_/=?v!<P25F`%!@9,pu~F.s7'NyLݷWP72߹CEvw 3iAy/Ud:F8œ߼dGE4(6ӄhweMPhv&^HH9vnZ:uZ%H~8vzYo9C ]߱ |17[sw M$B6"uꒋw>{v!@ @GGܮNSHh V-С#me%KvPժT<*RLY:u*vG6..޽z ⛂q_/_Id:s:E&˚iޢ)U *YBEڱ}mܴU/rԥs'#kժխW[ڝ8EahX_藹ذ!Thau:\z591v௅PZU(ΝS )OSٷ,HkKӑg۹k7kzvDEІ([иqCW.խ[ ]j-:|zpo]AeݩrtX澄FAwn0_:rz̑}y UZViBto٤Lg ڤ=7(t ۲Ғ,n?~N·m3q}]ZbEBX]?NG׭!.8^{OI2#kjKɍ4ڳo5wR_Ok37} iݎ* 3+Z8gŝR֧ׄh ߼Qfus'm%]Mgf9Pti lԘoD*zOQ"! ͛J7UY=)2Z'~)M|2g7)߷/\(5x1b6wdzEq"OK~! :Nc[wDn(^qY\meڮ#-]$߃4nx5B &C(:˟ ✈ܳ}h*žo V֣*rEEg[Nk[R%{v; x޼z.^ǎґ+{wlo%59._&y]{%jWwJ<.z"kiR(lzIlb0*\4kc9;zUN{`ʓ,sǂYgx@@'@``nW|Qi԰>=6l0M4Uo@HHuzckTr%9ڴic:t`!}YS{tJ<38ثmTPF i~yV:mv=Jcnj"m+V5CD7ޤY K,I#F #dw4~ė+bEQuޅ&ND{ZjNU1xu[Vl J>x`?tЖ #3R 6J/4zk,z s+[Fؿ1UO|Y885*fEs6ٿjՃ[_…ͭΡs Ԯڮkpe5#|Yh_8óͪo6p&X6g:ޞMv#Lj D=`惇޵xc %?' ا Y*S͘Eեjf'v%*ۤ 2%?c]aDzu*mC[e_y~?|Kz5r@?rN9H[Vf;Ppa5_~m 5i*[JAnKQJP7ޢiZl>kPIo[77nрƳ>zMK-[S'FѺӁ[+QR}_8!M{y''|cAӥ>e:du-ZRxzmL_7G߇VT:e?@MzsS;U]{_f1DF5kդ@oQԙ(z裏s>EX.Ɗ`iZ5kg_P |[qڵ嗟WuZd/_"gV 2e\DiQsQxH ܿS)VԊ,WE9 ]A9k)Ыw2̭hbt@B|"q@WiE@i9z2cG`y:yzzgͣ,Q1M_˕+h)::FOn[Ni3˖u>_tWSdfΘ.gϝSTXq >~|NOxf<޵bDFe~NjΝ&jL@r/>{wfɲe嗬{wIs>w(%LMw?/u)˙gc7嬧d1#CNSy7RS~I`栙iէ,~!Wxq0oh+<ǝ Jr}=>3gDkbx8J`)9̸q6VVZĥʔ ldϽcr_8Kb@ v=Nf`.yVI},Ɖ/!Be0=CgD&h9ד" O s&+(S.2"ӴRQ ZΛ&ʺᑳxOyE04_ERs>&M4y^:qG88p_9?&3"xcL8пT|ƚkBkR?ҏ,'_8'+P '*3f?8?޸5hl'G5'r}<vkb`@kꃗ~_4J%'"C6 ޕ#|=.yGflC#O_P+qv]|.fq<*D/`J^`Z/,A眶}9rP<%ıP~"~H^z-=;:= [^{ G?9B ޽ޘ\J80a )f*vD-ȷD`|iڴY1Y+VV&<>9 5i?K ŋQ.Dd!Ak׮'zemܯ3^RKҘ#e{1]_A1GvZJ+'oKDU3Ⱦ-XH3ߞFM5"7Jd$Oz\6' 4`cKlg*!wܮL'_ک4fjԸJO.Ҷ\mY\frѺ>7LHoƗ> l@Qov]~Vf l;b4guٻ(/C Z@:"Ml`\ ^+WkĂ J{BпNd3Ivyeg9h W?c]O:ܽ^Ӛf`?zfcwsY7Qπ哻n*"ipLVf5ۗug}^ /ks϶rg{@ў'MLP':,n[ i1N~7י( 99Y_ |5ty.oSW,a p6xѥNWC`^Ch!Sǩ 77EٿG`E+ 1c2>#֡)P,׀~h걩_6wq 7G;jY}إR~Q~?h7zχkٚ^mҌy,s89tP~}"q2gڞPe @ ҁ,r\]f>bzi>>,A^=2͹O#st=Y7|%ȫhrncNw'͛'X4ǭWWjPZs~`hֿjk <#A^]AS+NYbrh/:lI~Af̘*+A^С&%&qƭڎү%Y=xL]x?MچϜK 5 4 .mR`ΚjVe):-7UُI8ӢAXKD[qNqcźn^}^۶s3{33gfƉ?ՅcGVZO{2w;eSщQ&`SV{^c۹ߕRC#t[j[O3u}< fBOt`9jee;0Ch y/d\ڳ7M[E|ryGP,qZ4[qckQLwWnläKwūt2s'@H?;ȫ __cX_}U_GZ9~P,A^=]ftp MUլΝ.@?}Sh~QouX8=)C{yuZw'P)eu'f ;eȣt4?;81p/y:G#O!A=p;f:sʆUu073@(@7pYK.r8rOoY߄ A6ivY11fִAh |ւ\s ך޿-13]&sS0YΛp߀dށ5m=v8v \B~?B6jÅrXu:'U ^׭3ω?!Z; ?K'et[qq^ź~ݼ99ڋnrZFX{i^%Aĵnc-ھ֤O0= `;ͺd50Բu.go ghgg}, |̿Ayq;L.o;Om~WXif(,oǹ={l9}=orjΟA9H1- udzow(ڛ1P[RZGNK%|on~lMN:C.\$+WnuCW0sӦV|Ou}77r.dMd/ g ?fK ^w_=v鼽&e&h#Yj`4Ǭe&nnJcV+7mBnV>w`GQϱ^Kg >r6eIg}1׏zh3g=S2zs繧^UwýN8NkKv8zAbjLΦu njoʦǨJ@@^y^l:v"@_XmGO}ל1{Ejٵ;mEf5\wtߞ{ۯ} 8NaJ Yfd; 3}~.5w{Lw׭#}wa8#.?ȶ V@on"`}Orm|r{]ZG|\f`Μ>WOnɧn߮UUDZ6+VʤISemW{QV`n׮ݢh_P+';_]9iu 楺~jjfº6[{]Yq)AZtp۲ФF6+GtBNOQy+袩좣+:vP  d}p6ܚvQ+hs?e^'''MpcON7 \l ]בWݷu!ǫm{]mymh8:>`&lf@>쳳 s}u闑=Ƥ9lu`FKƍ^vi "JjOYd !a3(H.ﺀg! $@`ZeVMAEj6p|h?o2~3Wр}=,\p m.krmz\uee9@݁}2{ez34p8AΗ}QsNjdV`%==3_ŵv9 Zθ+MK>g u ¶`̯ 2.:SrȵN*:ZǽH LU~:კ流AOfD7/[=ZbX!?hzJqmz9jev9󹭷#V}{P9I{le٤ 2sb@8P9oRJ>; 8}2N:^p4UAGgaޖGUt=osܡط#dzPՙz3(?u9.E@ ?z`cڻF>̓yqlޒ([VўZ_m=tp6 2R>ԪUS~zpf[Yk/Z'nUuPX~Gu|EHj[n16X:`uZ~7}&Ï<h!]SS{"5]Zj7n,zKVr%iidҢ64QܷW ~m$Ճ{]c\#:ܳR|:.`zmy qv7ir:㮏4Ոyh tŝ[uҳTr};fFϞD3%@8{R\kGpn a.w?k|6Vq]^(Ԟ#>fAj5n"=6HlR<+^͓M&@CyI@žD`Ϣ 3=N:] G@p՟dzGj\-͚5~[\y5wMM991yTo[i(ҵۘlZW_*U(\Ӻմic^ʡUz(cN׶! izsxy[)懃k4ӣ7˚Y3D"Q[<[;]}^uMYǙ~rko[Mmtk;ݤSYG[i/gߎeop+;oؚYvK β^ @ws {l9T9~g3;Pў{d=3i K?=m打ؓrM;i lB{9yߣr ]^kp(E=j4}3^iӺsjokyy|:Xny~l? ?$**pqq2`@lus[~/zj:vY/bEzuʃkq}z;])*sprҵkg0errMmn:E0xξX8 ;`-K:thJO7wvm(̢ ݃<\v w;4`˜YS™75=.^z5RĀ=kf]{ x^ $ڌP[']f0^~e0ӮϹUi>wsYNfܶU4ФQ~V9٬@vRɣw+nڹoϫ,5ԑN-;L3(]fk+wvCM'h%f[s#qEfW_W+#?υa%_/kfΰ6dzNzյ٪ϴ rf[3#>u뭫V:Ӿ)7f߶>uz:} {Ϡ=);r9RT%\ʃ" P)pR5lOrm7[ҳɬYsM=ҦMkۤqFr8 tU? /+:H|>KYtlܸIʗ//]IpvҩSGIO?(y6?O=5Pvce4׮tkDYtg [{a ϗ-ː!5DlkP4iXs~Fwyrs^U|^S6Cs*lڛ]6tRRr+j(^|eŋH*UWG瞿]Z}9zujwܸ?XK~1R4e~`ܹLju 1@y^hJ}Ѯ-Z"sΗN,1ۯ,XĤokink_-ڛQfOH4h5>Ke:m!m9? _%|О׽L$.[*+Umw qGrԼZi*#~&Vqi5kcO|޼tA~bLPf|#1fNLS*lX0o=qX|KlFάꦧ:Ḿ75z3`Q;Z.7VC?OFsjoޙ?uSP&!mcUES\h}&FR5&$_@ӞzSJν^{zs,4!|iou-[6Uez4{ Dz6h>VosjPUls󹭽5oy'.?uն{}&I,4Qvl"dJBnVzm]]gjo`9|kgϔ刹=v&-4+ȲbV{^ N^} 9C묺D>_~nr.sVmh@q h >;VyCˤVwM(>~\c]Y7ߗ$;LC8_0|xu A`@B [o^ jOW2mtٹs\pAmWҜk<}rݭޤ?ュ1&wX}9 ֢ʁO<7mZ"K.|?"%^2[zj~(xٗ4!3s%0oV5,2L@IDATHt^|̓r鿟Ikysw:0^\;sϤIi=|g|(:8иޕyuowWkU/鹂9[~l岁_nfkzse+sӭks3Ar[Ay"?5y 4Iw?HwY:B .5-ۼ[wpu?AA{%_^nkx~^Q:n|Yܠ=ut3=@5묇]G~5P4WǞztvL@oAsAmߑz?v*чh*9#fQ8G .wgޙwޛf{ViaS.z13RyW3hJE4 w{P| b~l}'gwkг] #3|oǎ>GzMu  ?B(\ / _f &Y=[rRROΤo7= !y JnwW~[WnN3[Qf;^{۶ٙ9ekSĉSvk`\Ϙ!ӛ%E=o'|./DR(zYAA3f2ezn.K߶k꠫w(]3@V[t#vhOYC,zkYγYpdB{~tV-DZ(ڻ+1ÌH>E5-gwߞ},C;Ȼ|GyMA`9ٟ݇>`vQqdO)vh&O&Ю 7ˇ??|wIY7}zrS@?ˏ?-:pVnٳӻ±Eۥ-Fr) 1) \5/ofPчu(ou.îv_ߙi6ɗ+~3XzndRA:`Zw0)spQp+( jMSymTPm^@(չ[u ce&MI&i/YD PEEI! Φɒd7oɶ5WĚ?XL❲5i$nN]&Wa~Ըq#! .oM*m\Om.W^v9r(y7i51\ʕ/V4s3~ucn؅yV?:4ͭ]ajOPd -;L@Ԝ]{} < ͟i!J^[W,'ҖIoմ7m_Vt.vz{rMs[)/ 8GsNG*zjEz:U!l΁]7uz sբ-&CoN7߿[vSӚ[+.c~K534O.fz6h m\0ׄ}ġ8βWoMMXӛ4~eVR:aLx9oIEhޣ)Hmn3 .ۧw/y≌]n4n.PCS nfko5kZno}ͭ)ע܅^N xOACOsע=76)^x_颞6$GVL®}?iMR4%A%h &wr8%_-{P|%gj݃LhT ~4{A0 ϠP8~ׅuw@T \ UGmzk-s=f" ;Ыc c옽 D=z#4H@pj7ifۺZAt3(@@hj׮- wv>Č{Q@<#%\i.\.pW/yF@(;{ՁGy *@@-;di3~胲/-YV  @ ,\Xakp;ڇ tT yz#(C1:+B7T ڶMNZCG/ߴhl^XHU-ھ~s'Eg>U+ªqҁ( P3im5郂 ?R:o!@@@@@_t7"     He     @荀D@@@@@@z @@@@ph"     H@o !     @7NMD@@@@  2@@@@@ FI     @ tX    D8I4@@@@$@7@@@@@ '&"    a     e#4KMˋ/<ٶO?BƎ繌    @Z@-WN֭yUTL@@@@ *u"@Xl s˗r@@@@ Z@oA S?$}ܟ%X9H@@@?c sB@@@@@\ +#    '@7 -B@@@@r%@7W\    9 Iʔ)-7xU={eذ%={)ڷ-Me2yTYf][RE+uSR_~_*'zckJZYn8gYbߺuݳ{%͚5PՖզ=g̔U+W߂zuJӺIƍqFRNmINNĭiɄSѣ6w<Ѷ ͤNڲq&Yz̜9G/Xlj*K䤓Dۧb=#M֭]/! .2:*s{9KNRn][[d֬ٲu ݻwX QCپ=Y֬^+qrk#f9:VB@@@"XTnGpirɸGZK7o"+߰a-<$/9aRe3ceo˗y ^%wy*UwuC=&˶sNok+'w E*V\gyJ-[wVN2m{Txsqi_}ϓ{h2P4i<䳁VmדO<&nD50˹o+>e˖D Bymn_A<8|VgSn@@@*@ޠDBʕ_))`I?p@ڶm-K eРǥjNݜ/wuzRR@d='[|yg=qE}wú%dҪuKiִZƪ;y3yM=˗/'|r{gn՞[%I\Ҥic7mLlԮ]o_{i=˖r m6ALyBh _i߮gWzm¬a\1(լ۳g$h]VB@@@A@o8nC酫q̳/ezM7ȭh\of;vhߜU*ɀo |>++=ľ}M59_ExĈ_ͷ˲^޽~Xʖ-+%#~2}ݵ8A^MYoXvtf֞0o5|wCر̔ Փ'ջV\5jS{ovocn÷hso=EY^_v%Nw\f^۩] γ_z>wM5qb>~gIPJ`zI?`y6PVP?X;-3v3@@@;awJ A֯y1KR4Ixի _rMA|LΛm_p dީZf̘%oq뻽\ǍSbMۻÚvdY'/˝RV*= Cg[M{>>p|ŧV:;*L}g[魫Es{yu\_ўv<+ .1}}@gY 4i,A^w>y˷ #Afm]zm43    P3|F66[>_cz%\@ɞA^]f^$#~#pM6jv[+5_-7pY5[{ \HZUvhNE{[bhof0]w[3i:tho^-F3Ս7YG]6lhOsZw玝V.`ݬ/_N ~rm]'    @q [ϰ9uSJhܖʮM`%==3p[ŵ;o^{vUlGy &{ ĦhO^VL2Vue˖8Aή2yge2|y&\|ҵ['n(z\qeȑͲ7Xg8hYsV\ɪCC;"/׆]OzyF@@@|vO[TTTУvIMɼ]ߙ1qs[RDg_=1mwߺutRVFZ Nlޒ([6ݻgOРkNCgAN=,#CZ5 7nV=g|ч7ofڵtJn|>Yl1di֬5;9A azpכ__zVbktW鿦ϐO>b{;͛8Y9GwyCgyg࿤bŊ uw9QS?h0W}?L6m,<4˯gkQ1*.I۳ 9/f^ݺyn֚-"   E!@(ԋ`͛'r3&"оRLg     Iz#l壭}?osz>2~9 -.;Yy_{Vզu+i$#ઽe9qZnzGvmўVu־׮ېjr \pI=>:.\ę.ꉬǙG/&@@@@P2{KvV={iwlXK_3]PgvXdە0gСEo*vŊU"]8xz]vg kCjd[=C$ =˙1)t_K_AtXqkA@ٽ+ԵxR+u9o&gDݤsSYh6GY4J@@@@0 Ҥ =zJګ/?ˁm6r]wHݺu5ò\lhk0N,#_#Y`z8[Z=s۶imժA2]o5م/-LJ!Ck։֠~=iҤl9<[<⧑rŪܡ c|ԸlܸIʗ// E8WkǟgFjS%-m=ָ/LhNa d>j'(Logw&2a$9V^ ]5C{2eT.x-+WP^;y^ Y{7| گ}5}[o/w5|>ѿːBw(2   W2WTIJkyJJ9ﺁ۷r]*V(eKo5kсv@U96o"#1V:ru6o^vuOhoLD[$Uw۷[駃E{\a+^u} 3ϼdؑRڜX( jM~ԿܛzN/[Bf̜ezv訨lƛ3X ŵj`mrM}rXլ_\W[%#   y(չ[yڒZ\r2V5n5Ș`zjpu-9a7iQ Vjf͚X5fе&S4%Eƍ))nי4AnJ*UqxilӀgdI2uiL&e;ekvIܜ(vi5zګi&VWeznk9/j*&D3)fZQ,4+hqRswlI4ή&9ɤ#Yf@@@(H[u a5N=7zW~ټF@@@ D *E{4^GiSl!   "@@XJ@֭&%P9F@@@sa>     D@o #    .@p?Cyl߱cǜܡii;X !    @$ܭHh(mD@@@@ u s@@@@@ 1"    z]    Dވ9U4@@@@ \@@@@@ bF̩     ^o"    #@7bN E@@@@z0@@@@sh(     -@ۅ     @荘SEC@@@@@o.E@@@@"FlĴ"Fe˖(V${¨u4@@@@ -igͳ@fM.N;UWKѿ;@@@@@8HFޕ2eDdi4    @ [/G"q7o|ټ%Q- ^@@@@ ͛Z[G|y%-mG p@@@@Yt87!*VS'' B@@@@  tfܱ˙f@@@@p .gv@YlG v0@@@@+@{@@@@br"9 @@@@(zKs(Ptj    ޢqg$vNcs0@@@@as*hH $$4sL3    @ 3A;RFrU;m4    @pi@ :jF5a|CK$&&Z=*. f@@@@.曯83Ȳeʔiu @@@@I@o8 RsW)SZT"÷rF @@@@'Ps^-d>%U ::JZ\l9g @@@@E@o ))rxFʆm' C@@@(zKs(pI;1 [Z$.Ad5@@@@ GL&Oή +дiIHh&J.OcǎIjդzjra9x`-G@@@xRʸ'=@bkʗ?*Udɋ/*Gm>3@@@@(,R74hT.˖.$Ӌ7s>8    S@ ȑr豢lF@@@(eKsKܼcc@@@@ !DT    ޢg     @j@@@@@ [T@@@@AR      PTzJ"    ! "HA@@@@J@oQɳ_@@@@@ DzCI5     @Q -*y    H@o @@@@(*E%~@@@@@  $     E%@/     "!@@@@@6FP ĵi'}P`ui"ӛwT6z7 M)[.+GE5Ͽ,v@ȡÒ|VW*Lߑ=.VE\5j8uNIe0%@lL3cMA@@@ # Pb~TV:ۓ99]Hs?8B:Aޱ-s~lkOP^>W\)egkyW(7W.)e˕s_>rgf٦Ir kY=}y,˃YfNsi&@@@@ zxӦ4HZFRYsk};vxyuovo#VYmw:C˝5bT\' LJ+e; CmmZݻYۻw,\v~@@@(z)BV˘T *Vy:7K[ێ} ]!_Hgӳ)̞[MRJKZ5Egkݻțo#v8_   @ -\otL.^Wbp2Ҏ=*3 2"'ds ̦$X&;HM]$'='gcسwoJBf&$ӫz̘qtr$ٷ @@@ȿRP`˭Ϲrҙg<ɦ eUzZ{W1iAj4WoƲ}]MxVTD35r˵SrJX[ C;i   E@oq9D`OE6l:EZAҶAVN, nj{okh*VMXխ+]u }z} +@@@@ DzCI5 C׏SڟWm^Z{e%uѹߕs$ql;X3oGbLnܐX'[m~)k׮wyc~p遜Ca5@@@V@o؞@zlNj):JBSNfL5ɇn#fڑyy(4E̙kRΤhղBgG   ]'LJ*p ڵ~\]d3Oth׮--8+ۑJfyjDYybČ"Rפt    @hGjABX9mLk6k|1ٹu6l(eʔFa}:CΦe+Tpa. kP?KSeoRbCIMMsQjeg @@@ȟ5];>ѻיqbb r7ZN&g|WIζjraI.ʖ3/#+&-]4E@@@B#@8R @JL<6+Jgto[ҙvOLn޾ݚҺg/lӺ={KBS-'%Y^Zm'5]    @ j3 `VnX{R! ǎ I&`$"vfrIS:Y\;&ʦ%DY^UM{Ϻ޿:ٕ! %pFJrs114    ?ckDҸ)R'Wwy5hXWs֠{_8Y?wҴsW)1xֹq|Y>u,21hޔK+oڵ[&@ډU\WJJJv}`yv g    kRb4\qo$UccJ(ٿ{4cwn"5oTzRQc[ˤH]&-D}yr'PLib'p2zXIIIȚ5ȑ#@@@@@r wȝkIm#݅ @@@&P~\r@B%if㏉Rt))[?vL*KC{]*kA@@(9-9#EZbŊn٣    @J~@@ yCoJ   %G@o9)    SrX     Prs͑"    @1 [LO,    %G@o9)    SrX     Prs͑"    @1 [LO,    %G@o9)    SrX     Prs͑"    @1 [LO,    %G@o9)    SrX     Prs͑"    @1 [LO,    %G@o9)    SrX     PrʖCH@W_U(o5&}~ٓ=K*ը!Uy;y(S4<}ٴxa^b i *V޲t?~<[Fj8]er1Iݸ1([D׫۹mNOw^GDQ|E T[ $ˮ$Kܓ/o]\V P\oGz˔-[[? ׭jDoLj8MN# :L #p&eŴ)2'cߋGۜy/]GS~JŪUAZUlYTxms pZMXk 08W_']/:{+/BP7μ!=$s^GD(>ڴe8~ؽy;Q3TrfKK͛L@|r9}w'w u}}'~)^:,@Aqџ^UWoՈ#@I# a  HmsjlMk9?Xlρ X:~A?vg/=_KRNgQʨ^s^3v~DO+=4{_ި@LTkVس[O(DH?@rѥDh Ur.вGO^Aޜʚhҩvf-`Yl^_G @ In`5L#~^@o@\*xG %8#t=*G:u^VulllkBٖ3#Z) @^Jߎ8ߛv+!'ybc#@ :9 diI9brB~G5fH3Βg-U:UӧI۳ TX:1 ~G@ >fKoh  @ ,ĖgԆ;бrЌ8zl˙  Go, (RH;I}stQy"ٸh,m6:J&ͤvfrĤؾf$x۰sZsV@IDATMms%n]Q7ɞdY`MIiuz-N;C$$H&RQckέv,HL Z_zSZ[+U.\Xv4yE!si[qce)uFի/ͻu\32B`HfA<(L/'K\v{lYTϟ#kg[]m),v^Hzzb9m=#Tiק ]OMxٝ]9Զ,ch@ug2gONcjl_j6l()6Iq1;9rhht:di&c,x&N,7n_FpH7nM\g]Z~52|.+zs@KnbMBNif[&M|5e'ԀiihZO,֊:Z$q2]@VgK^bEZ{'^hsX7n&Xi@,3>kOzӅTlұT|F} lS$oX']AmW{4ߕ-n>;z)9q?dߎ9:Z[זiY?w^ο|7JUd˲2~jwQ]?$@ % !{]( k/ *φ|ADA4){H$@|ݽl$;wf3Ϝ9~k A2eȁWpMe V%*UA.CBxj%V USᾒiYgԼ39|,避S'[#;rPp\rm:y옾// Ԍ!<7+թ'{w䍲e|D06CM;E_|N$M]̲+r\4KDf>5pyHp{b1YVսhohrcm7m[Xx c < W:dF5 C( }/7 2_`\sLxh-wN^e*,Oy\yIX+|T Ekk/ޤ_XzmkKJrhܯJID ŏht͚R_);÷VYcoˮ0Q+xV}PwV*M7… XGng7/(w>WY3}GEo^*Q\.SPA?0zVrͳ/88gJq/r6Vڶo#7 a3UЏ "lQҰ[wS3|A6dZ:,@oP>}RdoD޲ǹu xU*թ;E&\1՛2ATekuFQl-J+EWʱe2s)T}&kyjvyb"{eveA*a*QU6zw_b]wb+ۗ.Ao'J%cJuIޗ̯e.~zNrQϦ:;u)_V. kjՔʑA#Jvqq.mßW x/vq.^JyEźRيUdgcڱ#gK-tYzG*=CW`uJAb :wsŧZf΋kӺM jo&\:%^GgUV(k~z?eyw,\L 8w·jAW7w7YhIX-: E粔=*zvDک{۲9G^ܿ&xYm۩1L{I QGר <.&P  "NjԂryIܺYe%>X<-J8mtms0~ /xOԇPeͳ~zu*,RՋNl#RYTUKTQqbJteV03XT^SJ*%bz 66/FM[R\YtAN*Y(a`+> ]tqKUU;Jf(}hb(x*eSn}} dm.=r\*o-Pyv;Rr$ %}i*q߲ImRRVTVP:vbJ8X&n"PhUZM[>>X!X>Jpy y;-._'8p ,e=Yn k^l??yzyi3B,I[kXYHheޒi| +'Or]3ۯ!8o#}nS᱇\fQ`7"nv%ds_FEVYzćgNy+O/qKlÆRF-l1>e!]G|VVD50P"Ī.(quCSz8bX6dŽ>e_VbC\# ?e9eQaE}oggR&lX'TЪ:<2R>ڗ+xT$%aVFV*T̺N^{<іduORtڱ ^uA%K*z=[]k*m}~ e^Q]qڢ6XϓڴxH]x_;gVCT^k;r{G.7>(y1pgZu~FM{%޽}%qlf(Q"4f0Bp&wn;IS3=уpcON%F;;0;"U.]W] J=ҨܯfηݩiꙇwnC`[ f1qV;~\6HѓI.ڮAY>Ρ0q'p`(yՔ_Ka73S), ,y +.(.'&d/xm;[MY`drHY.Qvhsu^:OJbsoXt^sjS'h}\4CAZAmfZ׮u7JYUR+gI(xSSkn#{M+Rq.JPYe,}!F4]k󪟰5,qGv3kSl8"핗kRe5 5=s?kԃYLنY4@h D{5<"BO녆]3.ZvViek>~nla@ qKPB LJWJͮ^~L_g찡4I}ʺwYw>퐇vt[0G.8+\1v݋#H ( qNg]n]0cgp[C(jWHkUѶAynHH  g}f{ri†K%/wRY-οAn+X]~)%/P8D)vOS22cS &+噡`CtU/lxQo⯪_J^aT{z}X̶WpQBwȼGe{{c>:ĔYjZ!O >0_?l/:(yQޣ_xTFc'{S/=:}":8j'h6Mg%(ǯۦ?=;PAŇSL sf<1MOuΰ[i5&i_%:rEɋ}c"{>#P9=xۨ/Z$@$>. Eя J>绯E *xv>f|YɴOޗFd :.= @*y̦Ǝ*8g'WB!y\%/{jܰ8O- yEf|.h?; Au A|s)yQy?ql'6;},yemfA_ ߑ !#/7^u}{z`G>}u:P/xkc`.[*7dpœ 3AO3|_;zj3rG$@$P`ʨGF0Xm[,;H#dWC|"}B`Nʨ`~nZ\Oo<^JF}us῿q뇊NJyP]MuZ[7n KHlbCP6I ZC ˛V ?({d*5LToؠ|AtT騦;Wwb=qmI`})h|xA1Bd?̢S  R A:UۼVWf]+,\AyAiepB&\Np#+} e{W"`jq9?y 1_\Oqtqsbf&p؝d W*_]6] 8|Nwdo#c݌G 2]w nRF Pe903𭧾󃏁A5~ 6ò!@Eo(6H G;<"3R: K]V*k̿`mYk2/W7g`TPV&vw)EkW[r)J2,yϞn{%R:EY-(.w(RVg}8oiYtJS^L;a|淌=VnmҥSw[/v˗ɉcG5kڈ~m>VORF3 6eͻzzsZ*qA9N (r֛J y~Xy n6`9X+ZPvٰ>k%Lcω็Y_Zr}U wb|DgWp-VFs"y}=(UJ0si9iC]~ Cl%p{u1k0XUPw%oȞk6oinkVkMw`=nq43l_^w CJF@k7[L$@E@YCYd=dm7Ǐ,󖀯 jh ֺ[-wߧ?OϞ)9eyq\ǺI{sq1:GSť\>crc;M[͝X><姦f W28'w6l ߴuڶV7 K3gdUzrBæ=(;= Ly1-ސk_0"Ӓ2oZK?uϐRe<>k0MF`P >}\S-21?P@M% +r^EQ?|zZy)oIǞQѢP=3a!Gr$7۽ضZ[v۰sD)o=ox3"++;@ O@Ow6P腜P띃L&={>~-MKQ>,}-JF;:FA(<)zݛfݻ c_Sfh墳w*uٙw7\ty #5B E/fMϱ:_,5 j< f}z~6-@,7L$@@I5]ѐ^"ΟRh Zsuݻl`OxhwݍRGTfC-e}LCV_)CN{lԹ_el4,{P r)3mK)|g(EuUuJۗMlqe}wO\T|]Zߤ& llʸ}z* ޮKlz |m9uӮa,P$D]82駳8R3LL wda%BJP& W<m V?{O$/faQa) ożrǛw,[ku̴s"y^3Y֗TYD*~w2֘殡QfёGL\4Su\oAn :v.-;8e='ݭoc]׮Cͮ.K r;Po+wu?4jl6c{1WN2nXL,hl#}sYw8= dgw[dOߛtT6D=/|p͂:e=]m7o>l -d愖+g *e%2gص<9%9 V3r*zY>s.XaSͪU]4 #D?ބwd<܁  w݁#$\X:+ZށTm={^߾}uU7ۯGE_6H8 J1[^}  `Ӽ9f%7n}əLҳ^}daTY)߾]Y|:Rz]F{w;WqY>bQj㲂2[kn zGΝK[Ơ'j͚K&Sz>۰K7MnjyV|)(VεηqvXԺ^66wek%^ABojVv|]yʵCl뻵@ K=2P7T`cHH 0t8s.*xҪ57_LІvFݺWn=Nv*PHg3m %GLK/qx ̥qfAf#]r缽XFu5"mXs ;.c Lߪ?f틵18jjYobS'Q;zK~ -?%s P@af?`%A!b7Àr%:iݮQ׿R8]7o2u?R_;7e/EX64&ժ7o! zA5u1jVyqR|q=r&3@iîu02w} {Ρ*'޲$6zy|GrQ̰0#7Uwe=ose _ 'n,Qc'❨Tx.۽fYƶ3.W@e @X9ytt`ݶ ,7Fo$+* [hڞ:xVMz4^' *U%acmUaR-Yi(w&/) {I59Rt*oɲ^7c6cRA[P#˗i%5jj7,ǔ+1Z^q4Y(*~0>K`-C>sq?[{K(cYҤG%T;( !zrt|8bL^0'#>v:_֨㐼wy=+yL<2ۯd6/ze>w+ضȄ9O?I~C u^M/)?|M0k$+/|;k ( Pk{9ݤ r!GMf" l3;uSJ RmQn1 3:^iŠ%f r*7'={;'axmZۗi#wdΕO=[8JX2؁q(s!yl *;o!!!.MJˇ B!vH(;Ae եGW_ң.n2Ϛ!_wlq ɜɇٹk.>N;PlUVMyíraE%c U~iem~},Ənk={H{iX|8wؾX:\YS#rPYz eEjﳙv1p|3`#zj-^CC JAg?>X8׳.;tP~yIIܱݚӘc]k #LPC }rwX䖯\Y!edAlL NlBRF8TSdUVei Q+)hp DOMOnQ0ØuJ+aFdO6# RL::Е>\Eܱ iV~?UQM<epyOOZcoU&)*n YWEkՖ5igg*ybmuEUeߦ krmdȷuSWMK|N/2,TD55:W?ss=k#p5*c(=S^P&T0;^7Q[kӮ;r#c33i n{jG~&bNG,<7)I1d\7BC{iYjCrFrƋIH @ kpCY1q0>^U ~Y)lJ6;=ݺ.Ӆ@+6;J $_^ | ?@H]w?/'X."6r#ٝ5㵙vߑsJcp5k>fV-9 _|%D.n."   !peҸgEX|ySHHHH -[8t䛯G9,sТq-    X8ZZJ^O>5} nHHHHGZ̲]plH߼y޼m @6m7k_љ~AӨ\m+ o<66V7.di\?l> #Z]AƟ}B'';sHHHH p PAP})Ӳu˶팟^?; l @%rs($P9p@6̙r`QnX'X$)~#Ik8bg~M!]3RS|n ;&׬-]}kӾBKvHHHHHHHHH 1B7vHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHH*zaf'IHHHHHHHH 3*z eHHHHHHHHHE$ cDHIݢSOȱI+]GDytNARRe)_'s9{&=ۛ]GJ*'lX/.\yUŕnjMm).QBjiW=uYe3%CKK1f~ʾאb2OM/Ф3Ktڋ՛MڱM$&.=?#^T[_*%aڽ[X6:ZIHhٖ[7˱CeoeJTjڱC \TT֥ꙴj$fTY[U7q;?'  (-ǘ=$3_|Y*))de+ZزUfޛW ϛI@){MIHqtOo1Gz^kԛy+$)M|#?sEop0Uτv-UoGvn7W;d/?@WTǤe}]zS?~_V@mrGj&r":w\.Wʗwʟo.gpwQysxѸ_euWguu.iDR 0j1/<I??.;e !@E0& @&v k@v. ᑑy\  p 鮔IN=,qq@Za+B$@$@$@Kދ%HHO lT'2ݡpO-+@ӡɰ@$@E@eӠ[5ӧe}.WǎhΜz~X1UWk @*zy& @!#PekGm( Qsg.v3\HHb7TxC^VOKV51{0b͑@kMɋMq$@$@$@$@>$ms$@$@@62H ,V= [u$@$5Z84ltEs7Se#dX9}ℱ k;:YO p)#uwZb:RRM+>c$n&͑l.jVjh- JhwZٽvl7;q ozjjL8!/"H=u3K֌V*}j5dϺ5b 5չ}.=]W.}s)>"AAן͊N^!{:O9T#TFz_pAVN$Ǔ6bbd{ܪ\`ZNq sf\:WzI:u$Z7p_;q%mK.#F iй.޳aįX5Z_%QժJ2t@6neCl7$MzQF۰:?uصjl_v_fGEI˾W9jfrK.*e#i<9)v_oJݎr9PGW׌vl6؞9-_^w멟1uѤ$u 6:L*)śUT 4y_̔)''] ]z s*׬k{Q)/uZVc՚4J\-gU 3@Ajp5[b՗ sZRs(Bxd7}+s䄽Գ0̉7W=RBQTUpKޗu9Kp0u!x.TD;y#$@$@$@$0 Cņ +XůXPP~ Qne%yLC9ΝqPtqٷe:kJyYPiңY"ٳRA}dCZZV[x/?u}L sJ4qf٧"b*KMq)/P ήիN갪:qȗߩ7TϞ)'m㞽$LYiܣ , Eo>{U6yStA%4mߑOsۜY¦ ʺr*5YaS=c6uԮ-BT־=zjE/gN46+xT{JԍT%K҃ŔU>? I=jB]štk:WZWy PJCyypNu͸ZY{x.2eZSN)YɣGL޷W Hu%ϾoܔڻYfк4u_٫85js#Ю %   |'@Eo#I:? G3hR jJ#N6Gǡ>XB&j>!_|#5k:y[hS>|EyA[3v4wʽۦsU7 4g׆9L^1GрH;z((2|Hv~bVƍ'P SnF+ȯ4*tŗV5Zv| -WN);j)Iv꣋a)v-药{N |vyu=3Xb}e,.!rb[? 9M%^fV:5kɠAXwvdW4h,U4 )IpbHfͥ=?Sɛx@~y 9~ Ҹ:oHY־8|-=itstt[0G,Hj;wQ/bgZB@IDAT腥Eur֯jԪĭjf~R`;_ٱ/ w}F7(_m %/û{jzƿ⪏0%/ `iqY*,7e$rNr۰zzZ %ؗzT?y}=벂qY^k%U _Jke5kYkAl(֣ݍl.;u$(xϓc]{!?H ~[جa5۠K7us[,(yDB?q9uhs Xt BR:>mideB3l0A4B$@$@$@B@9Rl' Ly5W\ev1#W޲]B1Iz{|m6OrzLSYRJ4D)ldIZыʪw])D >;W>GFm>[y̒?|mݺ.XŸ-V or[Z)̠0OٷO"bc ecZ-g͐Vl (2!qqn dʇJrpť7INH M[(#SyPq#VEoךN@[hV£tI􉓲qi|ZɋUϞ)k`Hig_O簯2u4f(z!5{{֭j[UQBGf?c7r}I̹;yZ>jmElT5yafVr o^pY#}v2HHHrIIH\ #z fMWKKΚ*ZγBƘBn9: K*aCHO>! YVM,׭'A͛T %'e?H4n=UiDz;ۆz-3`۱nҥtN;-8a /g47Mڹå[Zu_N]3`l6hR5\SxJ*$E$2WU[r4^v8K>,e+V|)5hpoJD+W"p+b'U}HVROvli,HHHda. g1+t&_ļMV^L{JܽKeRBrn7s۲( Ν;9j"?vSYjΉosh;E5knD_^OefdT]!յsRb*)e0$a\gd۫, iqF~werؗj.}4 %fdᯣ)v;<}%Y1(ƍxQ~JS~!#:cjKCG$Vp.rX?sҝʺxr fӺ}^532rlufSuK\9yF32wd;oʉ#GEf|Taˏ?^~Al'gT>GSfQ_OO<,I(b/#\f8u^ޱl|=fTITVg> yC?w8i ¦yﯽ,hAJ vf:ٸaCHHHHJX/X3&  _Swa[ENOX)CA*1~C)]$(Uʾ>@5!_. hyy=+3U[)w8e_@蕿Jװvհ{9F)]Pb8J5tvD)Ə%%arׂWV=+խ'G`#Qd_vcOUk@$@$@$@Vt`4 O @c-ivV>@xT u\Yxo]g* ̎) {I< ,dǟܧ[Uf-_I%. ;*#  H v`*wAY <2 Ŷ-[p7_rX E CW_ ?EqOܳa 4-,z.\,67oAHHpF   [C =/,jхs+*{tsr$"cHGwC;^[yK05GbjvAzd._l @7s$@$P$>%vvkew?RϙB$@$@$@$@$@$#T+ d-{t~KӞ@ M%ɚS%-ȅ 3E$@$@$@$@$@$ఈHHHJ.-z;d?N     (d Y   #P>f       7XHHH"̭n>+$ "кߵZn ɩ4ksuR:dX9}:e+JJ&R&:ZTOx9k߶Ev,Y첞/3wT"b.VOKN=jpI>ݲuz̺;Iz $V^]R dÜ֪oT8qC^ .txz)W9FLz;e#q`Vٺ_c7vݮ:%f-I?uJo9vXͦ\jإTT_R}bIT ?V-$@$@^ DV&mrAY=ճ\L%}ú={dOo'՛f5T^4냷̉l2۾ *z~=f=)z""䪧ZƿJjN|u9-%J!;v Eo7KHhi[ eՔ<*zQu)U*թ;E˝˗Ym5Z*a;GU* tՊ9}-P     (HT$}HrA`ìQ^0~ 3mMDV1S} .1RYs .XLy)9zN$5뇽j*`!q$+*H\FN(|r<%%Pܽv(Qlg)e^IYB*ݻ2#qHYh~^쭋/\ oFLUAK}LY9yMIWJGbŊ:Gչ^[6l(kCV=X@$@$@$@$@$@I} "T G]l_HR ϛikI(ca ,,T=)#u*z]QQOCVL lm˔+xZuD`ƛ>6AArcOJuJa;4>G)^(|O&KoWG%i#1E2tڅHyq_p~MP4mMgb44ϝ[a>>-zY\rY(]JrCɻB] չ!Tg%xqiܣ,4A67߼ffn      pC^7`M$@"7 ˎo ixiwYx*( kePg[ﻇgmːyQ;RR*XR7·: ߼iɇŝol O$@$@$@$@$@O9H$@yJ`ÜYr>3`E9ϊ*=_/_.u;vZ=*y,K_}2]>{p`HZa#UIr䬀墳nw     1Z07O$@&wY-5ZԊȪUMKE[Vi ؖ6~݋#$,"BIٷO;G$k>IQq-Ҡs]ϰ ~TxΑ%ǚLP8K :!aFRҕo~YTXֺf&$@$@$@$@$@$g Y3ۆUGTmLe ^?b*yW`S?xGg֮=[򣊩6m,rǹ'`V>cVu9zb0; e_% sgNmK*w ƨlw]}8rDvXvPzGFsnb|d8 VeVhgvbs\,f%PWUu ⷤ r7|P]8$|C^Q^*g9 >.pyb)L?}ZJH:7ՠAڡC*iR8;T #Z#lH|I izkn#˖YG{yR|RL[#DR֬lSu+Ta% *Vo5ٳn΂o˯UA:wתU jcA7Юr1݌(固JnԬO_ xIذζFPP4WI\FRF ]|*TqW$@$@$@$@$@$_/ TZK#kYJ`#fZ/FU&Xt R^m*)]&Cm{gH+)K\W^}$fm},2ޝo]*zYml?`D\ aA h5^׻B+moo?ikšߪߵj^\Yvsڲ 㴻3      ( Uj\@nIHE(VTxDU!AA$~՗^TFMkbʷmXa5uvm]4 szj{}bXn=S|z,j ժKHX4M#"P.C~8같} BrZTk\8 gHZRri;{lRr<5EJ.zP}*yFV՛7W ArOKve钦Wo&ݥLr+ P7[weͲ}ɢlw+i6iqE?}^#[ɣG u:ȵCIvY-vȆ+UFC5`,bʷzrcOK+b03IHHHHH χ}p$@$@D-f#0CR[)*T&pkps/`jl jZ_` <zP$:*3| c[4v!_isįX.Qn}Ì|k] p8e\KZm{m${ӶȜ[8I6ꥩA?|MGFIHHHHH }-p$@$@"g9z6m0*=}F~|Y9yW@ e.,d~@Q9x@{~w/glO{_:7Ҳ5}ykY2k=kZ~za٧,H W:HÒsR>r\e?!{j{T@ŶA֖>NߏʑDρӎ)ޟT= h`=g}!kJb. @(֦}bXSc  "DrF\,&1]CxY/{?x @ @ZVڥ@MM8MG?^0jvi  @ @['m:mG*t5=kbȐq_C}8>ź @ @@ LFT@mmm,^$6}zL<5WE׮]cc*2ԓ @ PnSy'#:tcǎ=G]:w}D>c-bڴQWWמ\; @ @:{8D@}AŁnj3jjz7~ѱx @ @:k3f7n5zT1]6?Hg޵J @Ahٗyg~o'? ω'x2f͚eؑ @ @`d/6@`}P y,X|S1[v%@ @l@Նno[`w? yf @ @# mWh[ \'z8o @ @uTZGn9sfq  @ @@z[U.PG @ @N @ @h]Ao*@ԹSW,[V7C @ :qU*v-зoO>8o @ @uTV`1b.Z ,l. @ :n9mSg :$z!cW}ѣGl1xP, fϙ˗/oF @ @5̊?~8Ӌ?A{q53P @ @(;C7]P)jkkc%ϴc/*vcwSʪ97 @ @f , "'St%z @ @@ zˮITfϙSҀf @ @\ZB=V`źUw.Λ!@ @ P.ri @ @ @F z7a @ @(AozT@u_h($@ @3U;kps+ԯ @ @" -P6mZnomq  @ @rKKG s}źsQѷ_ @ @ P=t6gCEԁ@ L>#:tcǎ=G]:w}D>c-bڴQWWW^ @ @m\'Hx#4hPx~1z̨{=gÏ8:/^R\6C @ @`s tܜ's.,0f1nn1jbȻrem~I:Vr˪; @ @߆`3|㼳b7) ω'x2f͚ @ @. ]b}P y,X|S1[A @ PFUeTU!PG +Wl @ @/ m}6y}yClՖ ⋿-m b @ @0tqv @ @hL @ @# 뒜icjs{à7 ,ls( @ @UE9嵩dgϙuΙ37ub; @ @( mEr(GRӠAbذ#=D @ @@m& @ @(AF9 @ @# -P @ @lwD @ @O[  @ @6J@лQl"@ @ @@z˧-Ԅ @ @% (6 @ @ @|jB @ @n @ @ P>i 5!@ @ @F z7A @ @(Ao @ @ @`*.}+Y\ƈ7D @ @$PqA?ձV[> @ @%PqAʕ+/vqQ  @ @ z-3^~啼 >SJ[ @ @l@Eq7v#c=بw @ @ڂ@E +6xom m @ @ QN>=#Eb  @ @T@ ;P @ @l@E?TǍ۵8o @ @Iމ/LKرQ]]ݞε @ @ @ wx ֭ka{J @ @v'PAoj)Smy3 @ @h/Λ;V}.Λ!@ @ @@{wŶ۷Oq  @ @ڋ@/)Un݊f @ @ ^*>٣V{W!@ @ @@Ds[\i @ @m\^W?mfk @ @ PwË3q @ @zvluVs΋/NA @ @$PAQ;DuKxSV @ @*:y f @ @ О*:=ZdIzy @ @ @ Tlлv#cȭ?n s @ @*6=CMR7C @ @&PAon]gy%@ @ @@Q"=&z+WŘ!@ @ @@{{n/kҤn @ @EŹ =jT @ @*M@[i- @ @h$ mb @ @& 譴S_ @ @46H @ @JVZ/ @ @ zX$@ @ @@ z+ԗ @ @@, @ @ @bK @ @FF   @ @ PiJk1%@ @ @@#Ao# @ @4Ao @ @ @E @ @TZL}  @ @ H@" @ @*M@[i- @ @h$ mbq _A8b>%ѱc : @ @mS@JZWUK,M+<=ԓbnݺ[jõưab]wc9 _K#@ @X_AJm~u{Ƭ. T y~ĤI qL|c'GNsD|kϤ>)sdOkr8~Ysyȵ7 /7^ MjMt}|wN~ĺ_0Yמm;y-xQtyu{ǃlb̏޷x%Y\3}.jMe˯3:AțMC/~yelvqR/|y7mÇǯAa6#.ҽ4cf͜ŕqM6.o} B޴)S_9xL ׷pᠹVAۦ+ @ @@ȠFoOV[ˑIZwL޵1x&4mbT͟oz7hr_٧Ϫa,RꅌzWnŤO'֗'p^KlUCS3h^5=:,ŗ&Ozg~~ͭ_/l[mz @ @'PAon%Lz8bْ|ʾC7(y2lZhCuxQTgS6VobsNݻw-nMcTXiJ{({Yzxڪ쵺8n%[ SOi{i+cvۑM7mn-{7dZ\ wmȹK @ P7FobLknJ=nΎDd4K!hly]S)ulklz|ڦtuպlyOvR_ڠҸAliJ_|zz&68f^{!7 ӟ\wd=qBG}a!BeU=Ӷ zk{7  @ @T@Ϗ{F( vfAo6^AoYQrz'h?S*.譞;5=~s,1͟i:Nomrޛ>ˇ+9VNyUQsoXZ+zWtݣGߨ~5N5g'EyXԊnkmvHIt۲/O}ѫWM1I6vVOZ<|ʕYZ]\N3/M_~ﳟWqQG1iPPz?Ya=sſnq  @ @ڑ@EPs翯K6h44A;mmP2_`f QsYOǢj+6/J_wzQSsowdCJ^MM)MQ7tKS#?_V=䭰Sw޼_\gcѢս 6:c? _?56uhRQϷDzH @ @ trl.fWc/O:TA#c1[iQb*뭤 <8"Bsʫg?Uɫ:tHl8`@ uԩ+… X[e"e?zӦψ)r&7&/lL]λ0n_|wlzxW^7b{kSAzى @ v*nMѡ6:gn1F xi+M"bWMÔ)S#4gܼƖNc7\Ȼ#@ @!zP;T\yocQU!F8:?}=^6۲)QuV? @ @o(= &0m?^wC @ @@E>Py @ @ !. @ @ @@ T sEs*\ۋ /wi]' @ @@ :{B]  @ @ Bnh!H @ @ @TR;/ @ @ZH@B!@ @ @@w^ @ @ C @ @R zK% @ @h!Ao A* @ @Jy  @ @ BT  @ @J% - @ @ @- @ @J@[*y%@ @ @@ z[R1 @ @(TK @ @b @ @ P*Ao䝗 @ @-$ m!H @ @ @TR;/ @ @ZH@B!@ @ @@w^ @ @ C @ @R zK% @ @h!Ao A* @ @Jy  @ @ BT  @ @J% - @ @ @- @ @J@[*y%@ @ @@ z[R1 @ @(TK @ @b @ @ P*Ao䝗 @ @-$ m!H @ @ @TR;/ @ @ZH@B!@ @ @@w^ @ @ C @ @R zK% @ @h!Ao A* @ @Jy  @ @ BT  @ @J% - @ @ @- @ @J@[*y%@ @ @@ z[R1 @ @(@ǭFn_s;/ @ @- @ @R㤉ϕM @ @(G&: @ @ @ @( D@ @ @ @R @ @ @`p @ @Z@[p~ @ @lwN @ @R zKO @ @Mn"  @ @ PjAo[  @ @ Mt8 @ @J- -u 8? @ @6Q@л'@ @ @@:rƶی^ѧwҥˆn_V`ҥ1g7onL|qb̘9Zp @ @@[0~ u~aUUUλƘ;FʽGɧG4jkk˺*G @ ~ѣCWcĈa1`@Mkf{wŋƌ'K/cw)bpt;'@ @ ÷9d'6[m]v'ƌ.zN*".gZukG%ތ1465:wiSۑK%@ @M~[ba^m)\#0xw=4vӧ9 @ @@Y;lC>&nBj &lev;w @ @6'PAo^strUWzjقF @ ;Y@hY~V}ҫfk˖4 @ @)PAo.]rΝ;mN".u_g]\$@ @Qzۨ"@ @ @@ tlF @ Цs̍6u]mb[[8&9=jY3:v+ ̙xn텖x  @X޽{.;C-Ųeb̙lG}AѣG;x!=D,^\3)7nM+rZowF͘9+8)-{nN;6}m^|qRL>.{=:T}Ҥci WB9[:v)={%KƫMɯʕ:tBܩs=I~wuk˨ @~m:.|!3Mf̘Wҥ˚)jz-߻xwxrիA7졇ovu8M4p@|C+i;b?ogbo]]tW}ۗ~+vڭ\!N#GnCos΋W7}!0w޼7tQS#2u>Afnх Ҥĉ/w̞=; b1t߿_ KDz׿8ꪨJef=|/^˳J=&Mw2dHw񑏜iz0N_ݻW%'| n} a 裟|]# Mo|sݷ6_wF)G?Ev]Xשms޼i\4}h&=Smy4ʌ3]YKXmM14N~/uRΗzRχ4^~6Npp| _ln;pbt?b78-Z\󻼤>ٛL @ SrWhψ3OLnG=Dk3ϩӦO<wH3U 埇̉)ٰw("}~㸣cԫ&N|MviPF׭!R0=g'ϑNyYM;lؐ| 04ӛM'؝mMϮ/# t{%ѡC"Mp6}F>ò, .{ 5œ{qwdZ=}=ll:v @l7vp<+W [|SW n=MRԔ~=y/06p\igwL[)Dh<[ o O_N[3ܔ¤4~qaƅ.nk2q̨&̧?UĹ]?ɝ{9.=k:?X)?o{YhѹcnqNVߥK)MSjG~h0}|5=->S|,~/>|]#!->?8M3N n9bDzթa}L(y;?ܮMO.zo1nvnRɋ.kXF+R͏3_|\\wg|G^q7OaJڂ ~T1x.^{doȯ|k񑏜_i4 9Y7iU3||y 6MR{oS㢋._-n}, @ м@T_\|5-|Ol}^.;('G޶l{]A|=yzٸdwOGo>a*+2~)/):͗SOۇ'd;;>iǯŐ7>=VG}">-qߚvOdui]wzT\n{ƛn˯NWN|~+{ꥹS~x0giGꑗ3jύ?pꆞfO=LSB'ysGfzoIA}\3?y(L颅 ?yHC\;"cF69zn9bx< q}}6Ia>'5ry Muǝwǽޟ鋸O|#yYÏez!M{URؚ@z[?O:1?HgOϠWR/Ͻ(OLa~s~țdd̆IkiJ̞=;.rcǬmR'4fnU =kzfMn]uِ K0_36](GKC=%dm+ƀ,MêtˎgJB$}s?/)~S4n8 ݴ ߢp»jbŊ8'S{d47\"N=|x;,fwj6.ɤ|H5_SeJK޽G?;dS )H?ǟ׵Tq%>ǐ!C֋ }14}񋧭1v"@+{mhw܀4CZ?VSW]R8wW!k{m~˟On?]Vc*y?nG IR׿R]ףvdt@⋧`=T-W[WŐږ>ӞfҬe)[+NlǬoN34#}//1 X4o[0A pwwq2⋲u~yq)ti4矓/-_pK |b;~qߌcW}3v8W?O~q5?|B?}+reIOilMy }s ~Լ7x/M=9!|7Aڴl#@T@zVaj*)l.6Mg}APn-vY<|sW[g<.5+Y,Po7\ӶqvB~: \ќz-6) ˅}WW5^K55ݜ Qzz^[<~z55.nS?KXQuwQ]R4&qࡰ_&٭:7w17ǔ+Bލ_փoz^y;o0IwcyMkS]~ M'JKqAoZzojЛzϔ= g>#{,+sJ;0wo8A;(r+/g3;f5wlb/ꇼ5~wzL~ }z){(g/}V6vMCJ;}\pQܧ/PFg_ }|z={~K:O7ݹ0-$u{]ƭf4<ۮ3s^ޱ֞sfڴo7>Uқ/̞|UF76_o_q /Aț6m翌&;n|sz]v⤓> 8pP{c8O=dsX@KܞzGK'k*,l_k uR0y׿WxC0N,7t)YRSxYa]5ݒznLțE(JuuלxO*ͷnV 5{Mc\cm |TƇ. ҂.HSP"(Xx ED) 6DО>|TDiI(Вtx07mٔM63wsrϜ%"L= ]8lGYw4VROl֥35l3z3ɧ_xm߾S9`N=pP.-!H |6񛃙*m~:|QƎ({쎎vֹ̙sA(O?: =F@HHHH.E*M//5[iMR⾜ƨKmgm[ޅ !Oc&63%d_[N2WmD2.IMr&l/GEIclO2L/7|-6h2LWSL3ÌpVeĵ{tZ{ FDmv4_'WvԬ9ZմV jZ"r>GR(жNw淫G>4^U˼6$% sAO eܫHz,u`Y^Gמ9sV]ueag:&E4nBDҴWkM}2(}iVɚ H !=*qשSK+3ڶ}B]{@4[/эl,Z111⥏Rռyq QcƼQEץFASL/xj7ܣ۫\ݡم 5kR-[R^}IMK8^JtvcURbo!IgS[Hfl.:z okHh1@!`Y ۦNuiԥ d?HcNcfYx-A$?Cr^߽ebp] dmC Ld,r- @G75}G5o9PRކ igwr! I`:r[SJ KNX8lv|s>+,1LG$:l00qmMΩjGBm/e_=}ڎrxZHR{&H]3B$:>*$ [W@: u Z͎2@y G-4:!Y"WRh޳GGBp _&Pm@>7R8DB(=fߘs[f޹sP Iϒ>K`7:N^L>|j޼Gfm۶w lǰ) -SN~.+V=R~;~Usg]8]*Pg)?mۼg)<<\?ŭ8INONq&8odbgg eQF$@$@$@$=  #R{W\IH8bn Q XtueݐX&8zg6э|JM׻mKyQiŴI[[ڎhK 96"8jze珻HZ&_ي]P|C29U'Ν=U=3o珯9VH"Y ,ȏ'NDx~ =ET/Ν &86L/IrIR6ch,0>.Q 2Bi928d6,+SjҤn-õvo~#ZZ[޼HI& UKH>xNj9xǎd$hzG{x PF$@$@$@$} 1^X1KH,"D.iB'C'1R۠OnDC/C)%rX¥'fKixIops=MZY">Ya3} եs{$tݥVNc8|A.hjZ.[u^Xpasgoę3guqÑ թruӧ.M!A7!j_l$D;2?+VNdæMEW$ୋ֭Շ0bSvoŸ?1[GCG#   9y4tsՏ=M}; R;e%2ǺNS,)/bMugͷ󵦠Z;oHlg?Mˉ_Mt A4G>3?n%Ne$N3ϘREڭLm7.ˡCVpx38Mds=Onp 5{ g)SK4] AJ\j;l3;*l(u:_RW Wi#\ s#@"[ һϓ;} [ c?+VqJ`AdWwl#WlJ i>86_߲%u?2aqWs :)LxC:;eZ*j@ή'+rG X8+2@-A41ΝQjX)wL]xAMWQѻe$V%h*'dz2.O OenHkh7Ue˖/x7|)9,"ۀט$K%8>'5~8ڮSNQ7n]Ζ.#?R?'    L/^|Y} utAlA=-9fgØ2׳6"p$˪X!ܩ  wBӦN֎;" P 0}("~Ǿ8jF4-cQb#^.?H}Lޤ- SN93?SMH  "'^Zj~D+mxcgu~ihƪY=H Ƽzv`_*`Y_jɆڵO"OlՠA}=ph㾟6}H.%T'N_wKtԞ7`ANah#_򛧝0례]Df yQoZӴ]j jeq&V~[+Uֺk׮GYb* K"Ag 8z5CկSVwخFǡO~p @h,^zgc9n$7&E$ 2tzi0,>n̳\~˘)MZM7T s]ЫtrG {Dc^;jO.sG?[ޤ?.e韓$lFǡ0셃'|gI|[o9].p"'/ۃ|Gzg'zRG^l 5\Ǿ:N!xcۄw>Pʪy OϪ?$[ >l9Nݏ#ST=H^ *S)r‘CR7\O =YHb_e28 +~ s2;a~r( s^;9l-8"mI65\Bfq`F<"JLdk(߀/5::Jw0h^s%p]C?H}٧:) jذYSRrLEMrW'/ν{^ա d38m[ZDuܫpcGI1gD=y ڸ"fV[Q 2\?C0f_L=7\G>5i yz=>OOg)'/w=gSTZEAgSu~G$3%yӿ'݅8 Qpc@SժV]U1QH+\4Kw޸knzO.m܎ܲeDު oQ$8ڪU n:]owmpz{>}T~QuYo%)eXX)م6Ж'{݉jؗ="wztXJ&MnM 0P/7>c&   H hb0"IJG%S^n! {XX&#੟s'Q\Yņ8thJrIRHJ崞& H` DVd*!H5z^L->sW+Dcڷc*"H,Ѕ!}r8![=fͳB[AkRwܔ^ .U-﷔X-m։==o*t. @C)A`+bŊTNyռjYǀ"H.s9{!^dlC.fK໪Q:(#`o#E!^qX]e:ܾ}'ы@8]5F ˝Z$@AvgK=?'{4lxkV>A     A]DVh[h㓖$>E}oGDzXZD> 'S;eEfN#r70J@0]LO,2 8pHmذI0yE+ΝH]ImD |O'U iDvpbPrkF=$֌vɡb-=x0.]bXǎQf;%zgΜ>Q+oJ#&5Bn^oܸC2KG/^\,%Rj߹yHHHHH g4[6r.y:\!Nsv:j9r:^'h=vL,%YE22q I+_+'nQ`>8dCר2eJir[PDOQQѻԾZ!賥7#Θ8սhѢe˻>%d CrJ"CGHHHHH xMk-pQ]TIz:"󾦣7eA`@*D.THe/˂N4=p3r6]cDB{I8DjX U2.A?q" E-cw$^ms:r8N'e˝GI7D é ]_[79 … Բem۶ a      x~ `W~@N"n];$d =!I';Y Dp"A{ͣ%4JJnٲ%OU.ܽB$w\re`_&'Q[~ݵ| 6ҏSޅwذ` ,F$@$@$@$@$@)#E)752#6d3wղ嫜݁TJ&`P>M5Lϭ^PG-f+;yH.VEtoJ$Nn[En*^"}͋dkNԗLGo_ɓ'Tݺ5-f쳃_6XHHHHH?DN1۹ ΁V=|꠲p:!,E` _TEхHN²vRIߢ{:-1E%Y'$W;u괖f($(%qI_ǎDnOsIENDB`python-elasticsearch-9.1.1/docs/reference/index.md000066400000000000000000000060421506101734100221720ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/index.html - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/overview.html --- # Python [overview] This is the official Python client for {{es}}. Its goal is to provide common ground for all {{es}}-related code in Python. For this reason, the client is designed to be unopinionated and extendable. API reference documentation for this client is available on [Read the Docs](https://elasticsearch-py.readthedocs.io). ## Example use [_example_use] Simple use-case: ```python >>> from datetime import datetime >>> from elasticsearch import Elasticsearch # Connect to 'http://localhost:9200' >>> client = Elasticsearch("http://localhost:9200") # Datetimes will be serialized: >>> client.index(index="my-index-000001", id=42, document={"any": "data", "timestamp": datetime.now()}) {'_id': '42', '_index': 'my-index-000001', '_type': 'test-type', '_version': 1, 'ok': True} # ...but not deserialized >>> client.get(index="my-index-000001", id=42)['_source'] {'any': 'data', 'timestamp': '2013-05-12T19:45:31.804229'} ``` ::::{tip} For an elaborate example of how to ingest data into Elastic Cloud, refer to [this page](docs-content://manage-data/ingest/ingesting-data-from-applications/ingest-data-with-python-on-elasticsearch-service.md). :::: ## Features [_features] The client’s features include: * Translating basic Python data types to and from JSON * Configurable automatic discovery of cluster nodes * Persistent connections * Load balancing (with pluggable selection strategy) across all available nodes * Node timeouts on transient errors * Thread safety * Pluggable architecture The client also contains a convenient set of [helpers](client-helpers.md) for some of the more engaging tasks like bulk indexing and reindexing. ## Elasticsearch Python DSL [_elasticsearch_python_dsl] For a higher level access with more limited scope, have a look at the DSL module, which provides a more convenient and idiomatic way to write and manipulate queries. ## Compatibility [_compatibility] Language clients are _forward compatible:_ each client version works with equivalent and later minor versions of {{es}} without breaking. Compatibility does not imply full feature parity. New {{es}} features are supported only in equivalent client versions. For example, an 8.12 client fully supports {{es}} 8.12 features and works with 8.13 without breaking; however, it does not support new {{es}} 8.13 features. An 8.13 client fully supports {{es}} 8.13 features. | Elasticsearch version | elasticsearch-py branch | | --- | --- | | main | main | | 9.x | 9.x | | 9.x | 8.x | | 8.x | 8.x | {{es}} language clients are also _backward compatible_ across minor versions — with default distributions and without guarantees. :::{tip} To upgrade to a new major version, first upgrade {{es}}, then upgrade the Python {{es}} client. ::: If you need to work with multiple client versions, note that older versions are also released as `elasticsearch7` and `elasticsearch8`. python-elasticsearch-9.1.1/docs/reference/installation.md000066400000000000000000000013251506101734100235630ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/installation.html --- # Installation [installation] [Download the latest version of Elasticsearch](https://www.elastic.co/downloads/elasticsearch) or [sign-up](https://cloud.elastic.co/registration?elektra=en-ess-sign-up-page) for a free trial of Elastic Cloud. The Python client for {{es}} can be installed with pip: ```sh $ python -m pip install elasticsearch ``` If your application uses async/await in Python you can install with the `async` extra: ```sh $ python -m pip install elasticsearch[async] ``` Read more about [how to use asyncio with this project](https://elasticsearch-py.readthedocs.io/en/master/async.md). python-elasticsearch-9.1.1/docs/reference/integrations.md000066400000000000000000000054421506101734100235740ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/integrations.html --- # Integrations [integrations] You can find integration options and information on this page. ## OpenTelemetry instrumentation [opentelemetry-intro] The Python Elasticsearch client supports native OpenTelemetry instrumentation following the [OpenTelemetry Semantic Conventions for Elasticsearch](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). Refer to the [Using OpenTelemetry](/reference/opentelemetry.md) page for details. ## ES|QL [esql-intro] [ES|QL](docs-content://explore-analyze/query-filter/languages/esql.md) is available through the Python Elasticsearch client. Refer to the [ES|QL and Pandas](/reference/esql-pandas.md) page to learn more about using ES|QL and Pandas together with dataframes. ## Transport [transport] The handling of connections, retries, and pooling is handled by the [Elastic Transport Python](https://github.com/elastic/elastic-transport-python) library. Documentation on the low-level classes is available on [Read the Docs](https://elastic-transport-python.readthedocs.io). ## Tracking requests with Opaque ID [opaque-id] You can enrich your requests against Elasticsearch with an identifier string, that allows you to discover this identifier in [deprecation logs](docs-content://deploy-manage/monitor/logging-configuration/update-elasticsearch-logging-levels.md#deprecation-logging), to support you with [identifying search slow log origin](elasticsearch://reference/elasticsearch/index-settings/slow-log.md) or to help with [identifying running tasks](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-tasks). The opaque ID can be set via the `opaque_id` parameter via the client `.options()` method: ```python client = Elasticsearch(...) client.options(opaque_id="request-id-...").search(...) ``` ## Type Hints [type-hints] Starting in `elasticsearch-py` v7.10.0 the library now ships with [type hints](https://www.python.org/dev/peps/pep-0484) and supports basic static type analysis with tools like [Mypy](http://mypy-lang.org) and [Pyright](https://github.com/microsoft/pyright). If we write a script that has a type error like using `request_timeout` with a `str` argument instead of `float` and then run Mypy on the script: ```python # script.py from elasticsearch import Elasticsearch client = Elasticsearch(...) client.options( request_timeout="5" # type error! ).search(...) # $ mypy script.py # script.py:5: error: Argument "request_timeout" to "search" of "Elasticsearch" has # incompatible type "str"; expected "Union[int, float, None]" # Found 1 error in 1 file (checked 1 source file) ``` Type hints also allow tools like your IDE to check types and provide better auto-complete functionality. python-elasticsearch-9.1.1/docs/reference/opentelemetry.md000066400000000000000000000143031506101734100237560ustar00rootroot00000000000000--- mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/opentelemetry.html --- # Using OpenTelemetry [opentelemetry] You can use [OpenTelemetry](https://opentelemetry.io/) to monitor the performance and behavior of your {{es}} requests through the Elasticsearch Python client. The Python client comes with built-in OpenTelemetry instrumentation that emits [distributed tracing spans](docs-content://solutions/observability/apm/traces-ui.md) by default. With that, applications using [manual OpenTelemetry instrumentation](https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry) or [automatic OpenTelemetry instrumentation](https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry) are enriched with additional spans that contain insightful information about the execution of the {{es}} requests. The native instrumentation in the Python client follows the [OpenTelemetry Semantic Conventions for {{es}}](https://opentelemetry.io/docs/specs/semconv/database/elasticsearch/). In particular, the instrumentation in the client covers the logical layer of {{es}} requests. A single span per request is created that is processed by the service through the Python client. The following image shows a trace that records the handling of two different {{es}} requests: an `info` request and a `search` request. % TO DO: Use `:class: screenshot` ![Distributed trace with Elasticsearch spans](images/otel-waterfall-without-http.png) Usually, OpenTelemetry auto-instrumentation modules come with instrumentation support for HTTP-level communication. In this case, in addition to the logical {{es}} client requests, spans will be captured for the physical HTTP requests emitted by the client. The following image shows a trace with both, {{es}} spans (in blue) and the corresponding HTTP-level spans (in red) after having installed the ``opentelemetry-instrumentation-urllib3`` package: % TO DO: Use `:class: screenshot` ![Distributed trace with Elasticsearch spans](images/otel-waterfall-with-http.png) Advanced Python client behavior such as nodes round-robin and request retries are revealed through the combination of logical {{es}} spans and the physical HTTP spans. The following example shows a `search` request in a scenario with two nodes: % TO DO: Use `:class: screenshot` ![Distributed trace with Elasticsearch spans](images/otel-waterfall-retry.png) The first node is unavailable and results in an HTTP error, while the retry to the second node succeeds. Both HTTP requests are subsumed by the logical {{es}} request span (in blue). ### Setup the OpenTelemetry instrumentation [_setup_the_opentelemetry_instrumentation] When using the [manual Python OpenTelemetry instrumentation](https://opentelemetry.io/docs/languages/python/instrumentation/) or the [OpenTelemetry Python agent](https://opentelemetry.io/docs/languages/python/automatic/), the Python client’s OpenTelemetry instrumentation is enabled by default and uses the global OpenTelemetry SDK with the global tracer provider. If you’re getting started with OpenTelemetry instrumentation, the following blog posts have step-by-step instructions to ingest and explore tracing data with the Elastic stack: * [Manual instrumentation with OpenTelemetry for Python applications](https://www.elastic.co/blog/manual-instrumentation-of-python-applications-opentelemetry) * [Automatic instrumentation with OpenTelemetry for Python applications](https://www.elastic.co/blog/auto-instrumentation-of-python-applications-opentelemetry) ## Comparison with community instrumentation [_comparison_with_community_instrumentation] The [commmunity OpenTelemetry Elasticsearch instrumentation](https://opentelemetry-python-contrib.readthedocs.io/en/latest/instrumentation/elasticsearch/elasticsearch.html) also instruments the client and sends OpenTelemetry traces, but was developed before the OpenTelemetry Semantic Conventions for {{es}}, so the traces attributes are inconsistent with other OpenTelemetry Elasticsearch client instrumentations. To avoid tracing the same requests twice, make sure to use only one instrumentation, either by uninstalling the opentelemetry-instrumentation-elasticsearch Python package or by [disabling the native instrumentation](#opentelemetry-config-enable). ### Configuring the OpenTelemetry instrumentation [_configuring_the_opentelemetry_instrumentation] You can configure this OpenTelemetry instrumentation through environment variables. The following configuration options are available. #### Enable / Disable the OpenTelemetry instrumentation [opentelemetry-config-enable] With this configuration option you can enable (default) or disable the built-in OpenTelemetry instrumentation. **Default:** `true` | | | | --- | --- | | Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED` | #### Capture search request bodies [_capture_search_request_bodies] Per default, the built-in OpenTelemetry instrumentation does not capture request bodies due to data privacy considerations. You can use this option to enable capturing of search queries from the request bodies of {{es}} search requests in case you wish to gather this information regardless. The options are to capture the raw search query or not capture it at all. **Default:** `omit` **Valid Options:** `omit`, `raw` | | | | --- | --- | | Environment Variable | `OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY` | ### Overhead [_overhead] The OpenTelemetry instrumentation (as any other monitoring approach) may come with a slight overhead on CPU, memory, and/or latency. The overhead may only occur when the instrumentation is enabled (default) and an OpenTelemetry SDK is active in the target application. When the instrumentation is disabled or no OpenTelemetry SDK is active within the target application, monitoring overhead is not expected when using the client. Even in cases where the instrumentation is enabled and is actively used (by an OpenTelemetry SDK), the overhead is minimal and negligible in the vast majority of cases. In edge cases where there is a noticeable overhead, the [instrumentation can be explicitly disabled](#opentelemetry-config-enable) to eliminate any potential impact on performance. python-elasticsearch-9.1.1/docs/reference/querying.md000066400000000000000000000104271506101734100227300ustar00rootroot00000000000000# Querying The Python Elasticsearch client provides several ways to send queries to Elasticsearch. This document explains the details of how to construct and execute queries using the client. This document does not cover the DSL module. ## From API URLs to function calls Elasticsearch APIs are grouped by namespaces. * There's the global namespace, with APIs like the Search API (`GET _search`) or the Index API (`PUT //_doc/<_id>` and related endpoints). * Then there are all the other namespaces, such as: * Indices with APIs like the Create index API (`PUT /my-index`), * ES|QL with the Run an ES|QL query API (`POST /_async`), * and so on. As a result, when you know which namespace and function you need, you can call the function. Assuming that `client` is an Elasticsearch instance, here is how you would call the examples from above: * Global namespace: `client.search(...)` and `client.index(...)` * Other namespaces: * Indices: `client.indices.create(...)` * ES|QL: `client.esql.query(...)` How can you figure out the namespace? * The [Elasticsearch API docs](https://www.elastic.co/docs/api/doc/elasticsearch/) can help, even though the tags it uses do not fully map to namespaces. * You can also use the client documentation, by: * browsing the [Elasticsearch API Reference](https://elasticsearch-py.readthedocs.io/en/stable/api.html) page, or * searching for your endpoint using [Read the Docs](https://elasticsearch-py.readthedocs.io/) search, which is powered by Elasticsearch! * Finally, for Elasticsearch 8.x, most examples in the [Elasticsearch guide](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) are also available in Python. (This is still a work in progress for Elasticsearch 9.x.) In the example below, `client.ingest.put_pipeline(...)` is the function that calls the "Create or update a pipeline" API. ![Python code example in the Elasticsearch guide](images/python-example.png) ## Parameters Now that you know which functions to call, the next step is parameters. To avoid ambiguity, the Python Elasticsearch client mandates keyword arguments. To give an example, let's look at the ["Create an index" API](https://elasticsearch-py.readthedocs.io/en/stable/api/indices.html#elasticsearch.client.IndicesClient.create). There's only one required parameter, `index`, so the minimal form looks like this: ```python from elasticsearch import Elasticsearch client = Elasticsearch("http://localhost:9200", api_key="...") client.indices.create(index="my-index") ``` You can also use other parameters, including the first level of body parameters, such as: ```python resp = client.indices.create( index="logs", aliases={"logs-alias": {}}, mappings={"name": {"type": "text"}}, ) print(resp) ``` In this case, the client will send to Elasticsearch the following JSON body: ```console PUT /logs { "aliases": {"logs-alias": {}}, "mappings": {"name": {"type": "text"}} } ``` ## Unknown parameters or APIs Like other clients, the Python Elasticsearch client is generated from the [Elasticsearch specification](https://github.com/elastic/elasticsearch-specification). While we strive to keep it up to date, it is not (yet!) perfect, and sometimes body parameters are missing. In this case, you can specify the body directly, as follows: ```python resp = client.indices.create( index="logs", body={ "aliases": {"logs-alias": {}}, "mappings": {"name": {"type": "text"}}, "missing_parameter": "foo", } ) print(resp) ``` In the event where an API is missing, you need to use the low-level `perform_request` function: ```python resp = client.perform_request( "PUT", "/logs" index="logs", headers={"content-type": "application/json", "accept": "application/json"}, body={ "aliases": {"logs-alias": {}}, "mappings": {"name": {"type": "text"}}, "missing_parameter": "foo", } ) print(resp) ``` One benefit of this function is that it lets you use arbitrary headers, such as the `es-security-runas-user` header used to [impersonate users](https://www.elastic.co/guide/en/elasticsearch/reference/current/run-as-privilege.html). ## Options You can specify options such as request timeouts or retries using the `.options()` API, see the [Configuration](./configuration.md) page for details.python-elasticsearch-9.1.1/docs/reference/toc.yml000066400000000000000000000010601506101734100220440ustar00rootroot00000000000000toc: - file: index.md - file: getting-started.md - file: installation.md - file: connecting.md - file: configuration.md - file: querying.md - file: esql-query-builder.md - file: async.md - file: integrations.md children: - file: opentelemetry.md - file: esql-pandas.md - file: examples.md - file: elasticsearch-dsl.md children: - file: dsl_configuration.md - file: dsl_tutorials.md - file: dsl_how_to_guides.md - file: dsl_examples.md - file: dsl_migrating.md - file: client-helpers.md python-elasticsearch-9.1.1/docs/release-notes/000077500000000000000000000000001506101734100213475ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/release-notes/breaking-changes.md000066400000000000000000000055211506101734100250640ustar00rootroot00000000000000--- navigation_title: "Breaking changes" --- # Elasticsearch Python Client breaking changes [elasticsearch-python-client-breaking-changes] Breaking changes can impact your Elastic applications, potentially disrupting normal operations. Before you upgrade, carefully review the Elasticsearch Python Client breaking changes and take the necessary steps to mitigate any issues. To learn how to upgrade, check [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version [elasticsearch-python-client-nextversion-breaking-changes] % ::::{dropdown} Title of breaking change % Description of the breaking change. % For more information, check [PR #](PR link). % **Impact**
Impact of the breaking change. % **Action**
Steps for mitigating deprecation impact. % :::: ## 9.0.0 [elasticsearch-python-client-900-breaking-changes] ::::{dropdown} Remove deprecated Elasticsearch() options The `timeout`, `randomize_hosts`, `host_info_callback`, `sniffer_timeout`, `sniff_on_connection_fail` and `maxsize` parameters were deprecated in elasticsearch-py 8.0 and are now removed from `Elasticsearch.__init__()`. For more information, check [PR #2840](https://github.com/elastic/elasticsearch-py/pull/2840). **Impact**
These parameters were removed in favor of more descriptive versions. Using any of these parameters will prevent instantiating the Elasticsearch client. **Action**
These parameters can be replaced as follows: * `timeout` is now `request_timeout` * `randomize_hosts` is now `randomize_nodes_in_pool` * `host_info_callback` is now `sniffed_node_callback` * `sniffer_timeout` is now `min_delay_between_sniffing` * `sniff_on_connection_fail` is now `sniff_on_node_failure` * `maxsize` is now `connections_per_node` :::: ::::{dropdown} Remove deprecated url_prefix and use_ssl host keys When instantiating a new client, `hosts` can be specified as a dictionary. The `url_prefix` and `use_ssl` keys are no longer allowed. For more information, check [PR #2797](https://github.com/elastic/elasticsearch-py/pull/2797). **Impact**
Using any of these parameters will prevent instantiating the Elasticsearch client. **Action**
The parameters can be replaced as follows: * `use_ssl` isn't needed, as a scheme is required since elasticsearch-py 8.0 (`http` or `https`) * `url_prefix` should be replaced with `path_prefix`, which is more descriptive. This functionality allows you to deploy Elasticsearch under a specific path, such as `http://host:port/path/to/elasticsearch`, instead of the default root path (`http://host:port/`) :::: ::::{dropdown} Remove APIs Elasticsearch 9 removed the kNN search and Unfreeze index APIs. **Action**
* The kNN search API has been replaced by the `knn` option in the search API since Elasticsearch 8.4. * The Unfreeze index API was deprecated in Elasticsearch 7.14 and has been removed in Elasticsearch 9. :::: python-elasticsearch-9.1.1/docs/release-notes/deprecations.md000066400000000000000000000022621506101734100243530ustar00rootroot00000000000000--- navigation_title: "Deprecations" --- # Elasticsearch Python Client deprecations [elasticsearch-python-client-deprecations] Over time, certain Elastic functionality becomes outdated and is replaced or removed. To help with the transition, Elastic deprecates functionality for a period before removal, giving you time to update your applications. Review the deprecated functionality for Elasticsearch Python Client. While deprecations have no immediate impact, we strongly encourage you update your implementation after you upgrade. To learn how to upgrade, check out [Upgrade](docs-content://deploy-manage/upgrade.md). % ## Next version [elasticsearch-python-client-versionnext-deprecations] % ::::{dropdown} Deprecation title % Description of the deprecation. % For more information, check [PR #](PR link). % **Impact**
Impact of deprecation. % **Action**
Steps for mitigating deprecation impact. % :::: % ## 9.0.0 [elasticsearch-python-client-900-deprecations] % ::::{dropdown} Deprecation title % Description of the deprecation. % For more information, check [PR #](PR link). % **Impact**
Impact of deprecation. % **Action**
Steps for mitigating deprecation impact. % ::::python-elasticsearch-9.1.1/docs/release-notes/index.md000066400000000000000000000213341506101734100230030ustar00rootroot00000000000000--- navigation_title: "Elasticsearch Python Client" mapped_pages: - https://www.elastic.co/guide/en/elasticsearch/client/python-api/current/release-notes.html --- # Elasticsearch Python Client release notes [elasticsearch-python-client-release-notes] Review the changes, fixes, and more in each version of Elasticsearch Python Client. To check for security updates, go to [Security announcements for the Elastic stack](https://discuss.elastic.co/c/announcements/security-announcements/31). % Release notes include only features, enhancements, and fixes. Add breaking changes, deprecations, and known issues to the applicable release notes sections. % ## version.next [elasticsearch-python-client-next-release-notes] % ### Features and enhancements [elasticsearch-python-client-next-features-enhancements] % * % ### Fixes [elasticsearch-python-client-next-fixes] ## 9.1.1 (2025-09-11) * ES|QL query builder integration with the DSL module ([#3058](https://github.com/elastic/elasticsearch-py/pull/3058)) * ES|QL query builder robustness fixes ([#3017](https://github.com/elastic/elasticsearch-py/pull/3017)) * Fix ES|QL `multi_match()` signature ([#3052](https://github.com/elastic/elasticsearch-py/pull/3052)) API * Add support for ES|QL query builder objects to ES|QL Query and Async Query APIs * Add Transform Set Upgrade Mode API * Fix type of `fields` parameter of Term Vectors API to array of strings * Fix type of `params` parameter of SQL Query API to array DSL * Preserve the `skip_empty` setting in `to_dict()` recursive serializations ([#3041](https://github.com/elastic/elasticsearch-py/pull/3041)) * Add `separator_group` and `separators` attributes to `ChunkingSettings` type * Add `primary` attribute to `ShardFailure` type * Fix type of `key` attribute of `ArrayPercentilesItem` to float ## 9.1.0 (2025-07-30) Enhancements * ES|QL query builder (technical preview) ([#2997](https://github.com/elastic/elasticsearch-py/pull/2997)) * Update OpenTelemetry conventions ([#2999](https://github.com/elastic/elasticsearch-py/pull/2999)) * Add option to disable accurate reporting of file and line location in warnings (Fixes #3003) ([#3006](https://github.com/elastic/elasticsearch-py/pull/3006)) APIs * Remove `if_primary_term`, `if_seq_no` and `op_type` from Create API * Remove `master_timeout` from Ingest Get Ip Location Database API * Remove `application`, `priviledge` and `username` from the Security Get User API * Rename `type_query_string` to `type` in License Post Start Trial API * Add `require_data_stream` to Index API * Add `settings_filter` to Cluster Get Component Template API * Add `cause` to Cluster Put Component Template API * Add `master_timeout` to Cluster State API * Add `ccs_minimize_roundtrips` to EQL Search API * Add `keep_alive` and `keep_on_completion` to ES|QL Async Query API * Add `format` to ES|QL Async Query Get API * Add ES|QL Get Query and List Queries APIs * Add Indices Delete Data Stream Options API * Add Indices Get Data Stream Options and Put Data Stream Options APIS * Add Indices Get Data Stream Settings and Put Data Stream Settings APIs * Add `allow_no_indices`, `expand_wildcards` and `ignore_available` to Indices Recovery API * Add Indices Remove Block API * Add Amazon Sagemaker to Inference API * Add `input_type` to Inference API * Add `timeout` to all Inference Put APIs * Add Inference Put Custom API * Add Inference Put DeepSeek API * Add `task_settings` to Put HuggingFace API * Add `refresh` to Security Grant API Key API * Add `wait_for_completion` to the Snapshot Delete API * Add `state` to Snapshot Get API * Add `refresh` to Synonyms Put Synonym, Put Synonym Rule and Delete Synonym Rule APIs DSL * Handle lists in `copy_to` option in DSL field declarations correctly (Fixes #2992) ([#2993](https://github.com/elastic/elasticsearch-py/pull/2993)) * Add `index_options` to SparseVector type * Add SparseVectorIndexOptions type * Add `key` to FiltersBucket type Other changes * Drop support for Python 3.8 ([#3001](https://github.com/elastic/elasticsearch-py/pull/3001)) ## 9.0.2 (2025-06-05) [elasticsearch-python-client-902-release-notes] ### DSL * Add `rescore_vector` to `DenseVectorIndexOptions` ## 9.0.1 (2025-04-28) [elasticsearch-python-client-901-release-notes] ### Features and enhancements [elasticsearch-python-client-901-features-enhancements] * Surface caused_by in ApiError ([#2932](https://github.com/elastic/elasticsearch-py/pull/2932)) * Clarify Elasticsearch 9.x compatibility ([#2928](https://github.com/elastic/elasticsearch-py/pull/2928)) * Reorganize Sphinx docs to only include reference pages ([#2776](https://github.com/elastic/elasticsearch-py/pull/2776)) ## 9.0.0 (2025-04-15) [elasticsearch-python-client-900-release-notes] :::{tip} Upgrade to Elasticsearch 9 before using elasticsearch-py 9.0.0 or later. Using elasticsearch-py 9.0.0 on an Elasticsearch 8 server will fail. Since language clients are forward-compatible, you should first upgrade Elasticsearch, then the Elasticsearch client. See the [compatibility documentation](/reference/index.md#_compatibility) for more details. ::: ### Breaking changes * Remove deprecated `Elasticsearch()` options ([#2840](https://github.com/elastic/elasticsearch-py/pull/2840)) * Remove deprecated `url_prefix` and `use_ssl` options ([#2797](https://github.com/elastic/elasticsearch-py/pull/2797)) See the [breaking changes page](breaking-changes.md) for more details. ### Enhancements * Merge [`elasticsearch-dsl-py`](https://github.com/elastic/elasticsearch-dsl-py/) _ package ([#2736](https://github.com/elastic/elasticsearch-py/pull/2736)) * Add Python DSL documentation ([#2761](https://github.com/elastic/elasticsearch-py/pull/2761)) * Autogenerate DSL field classes from schema ([#2780](https://github.com/elastic/elasticsearch-py/pull/2780)) * Improve DSL documentation examples with class-based queries and type hints ([#2857](https://github.com/elastic/elasticsearch-py/pull/2857)) * Document the use of `param()` in Python DSL methods ([#2861](https://github.com/elastic/elasticsearch-py/pull/2861)) * Migrate documentation from AsciiDoc to Markdown format ([#2806](https://github.com/elastic/elasticsearch-py/pull/2806)) * Document use of sub-clients ([#2798](https://github.com/elastic/elasticsearch-py/pull/2798)) * Document how to make API calls ([#2843](https://github.com/elastic/elasticsearch-py/pull/2843)) * Fix `simulate` sub-client documentation ([#2749](https://github.com/elastic/elasticsearch-py/pull/2749)) ### APIs * Remove deprecated `/_knn_search` API * Remove Unfreeze an index API * Remove `min_compatible_shard_node` from Search and Async Search Submit APIs * Remove local parameter from cat alias, Alias exists, and Get alias APIs * Remove `verbose` from Index segments API * Remove `include_model_definition` from Get trained model configuration info API * Remove `wait_for_active_shards` from experimental Get field usage stats API * Support soft-deletes in connectors: * Add `hard` to Delete connector API * Add `include_deleted` to Get and List Connector APIs * Add `master_timeout` to Migrate to data tiers routing APIs * Add `master_timeout` to the Alias exists and Get alias APIs. * Add `expand_wildcards` to Create snapshot API * Rename incorrect `access_token` to `token` in Logout of OpenID Connect API * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elastic Inference Service (EIS), Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI * Add Elastic Inference Service (EIS) chat completion API * Add Reindex legacy backing indices APIs * Add Create an index from a source index API * Add `include_source_on_error` to Create, Index, Update and Bulk APIs * Add Stop async ES|QL query API * Add `timeout` to Resolve Cluster API * Add `adaptive_allocations` body field to Start and Update a trained model deployment API * Rename `index_template_subtitutions` to `index_template_substitutions` in Simulate data ingestion API * Add `if_primary_term`, `if_seq_no`, `op_type`, `require_alias` and `require_data_stream` to Create API * Add `max_concurrent_shard_requests` to Open point in time API * Add `local` and `flat_settings` to Check index templates API * Add `reopen` to Update index settings API * Add `resource` to Reload search analyzer API * Add `lazy` to Roll over to a new index API * Add `cause` and `create` to Simulate index template APIs * Add inference APIs: Alibaba Cloud AI Search, Amazon Bedrock, Anthropic, Azure AI Studio, Azure OpenAI, Cohere, Elasticsearch, ELSER, Google AI Studio, Google Vertex AI, Hugging Face, Jina AI, Mistral, OpenAI, and Voyage AI ### DSL * Add `ignore_malformed`, `script`, `on_script_error` and `time_series_dimension` to Boolean field * Add `index` to GeoShape field * Add `search_inference_id` to SemanticText field python-elasticsearch-9.1.1/docs/release-notes/known-issues.md000066400000000000000000000006711506101734100243420ustar00rootroot00000000000000--- navigation_title: "Known issues" --- # Elasticsearch Python Client known issues [elasticsearch-python-client-known-issues] % Use the following template to add entries to this page. % :::{dropdown} Title of known issue % **Details** % On [Month/Day/Year], a known issue was discovered that [description of known issue]. % **Workaround** % Workaround description. % **Resolved** % On [Month/Day/Year], this issue was resolved. :::python-elasticsearch-9.1.1/docs/release-notes/toc.yml000066400000000000000000000001511506101734100226540ustar00rootroot00000000000000toc: - file: index.md - file: known-issues.md - file: breaking-changes.md - file: deprecations.mdpython-elasticsearch-9.1.1/docs/sphinx/000077500000000000000000000000001506101734100201125ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/sphinx/Makefile000066400000000000000000000152061506101734100215560ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Elasticsearch.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Elasticsearch.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Elasticsearch" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Elasticsearch" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." python-elasticsearch-9.1.1/docs/sphinx/_static/000077500000000000000000000000001506101734100215405ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/sphinx/_static/css/000077500000000000000000000000001506101734100223305ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/sphinx/_static/css/custom.css000066400000000000000000000004471506101734100243610ustar00rootroot00000000000000/* Display GitHub Flavored Markdown code blocks correctly */ .rst-content pre { background-color: #f5f5f5; border-radius: 6px; padding: 16px; margin: 16px 0; overflow-x: auto; } .rst-content pre code { background-color: #f5f5f5; white-space: pre; border: none; padding: 0; } python-elasticsearch-9.1.1/docs/sphinx/_static/images/000077500000000000000000000000001506101734100230055ustar00rootroot00000000000000python-elasticsearch-9.1.1/docs/sphinx/_static/images/create-api-key.png000066400000000000000000002352741506101734100263300ustar00rootroot00000000000000PNG  IHDRFZef-zTXtRaw profile type exifxڥWv8E1ZooReWee*"Hfe룉r-6_?w w?ob~}yK=ٚW넷W-}PZ|^}r!HׅB?,[-0: F?bnu~~p߬gxǗ;V$y}.4wqw\̩.~ S,I{뽂4902+?5(E%Fr!Ax} ϕ!bd\ dͅ䲳$3ud1LnМR='ۆ32BܴIV)RC=SJ9TSK=s9,P%hJ*RK+kZkr+zgΕ;gw}FɌ<ʨ>)gyYg}*vRqwuv9O9߳J뷯Ț{eL5-Np3Mtd(Wlu1zeN93ɤ-Oǽ'͔)of(uy)kK44oƞ.TPmv.Ӯw=phe@ ʎ}6~C1gh>On|$ƯDݰy̽*ûS].=z͝Uwk!m҉xGõlBc1yt:sDJYj:q՞=̔,N^=[=&ЮFq z$THё:ŦxdV#@ˮ0;=Ҟf?MI`|ll 0ήiYgB>K]+ʕF9v2'%Xg|=t|# +ώEnR´S>#L۶; (DdnfWYMFe']uSSpةyhrwO+f R)S<hڥZ6q fVv+0w&92kV8OT-fnn᯸e9Rug^dY{#lu:ƂF ##0B(R! uL#i"K+O^,]TTè(Bw6  *\:wh׮pP-Nh^Hu.G$Yp_7m?h-&95=j^?DxW J3}r&"5|=2= {E.{oȣ+?^キݼgr: 4e' g𶻈kUyz0k2BtȽйpԇдfe&w|򠖖ޗ먦^G[ Z~Zc<\<($W3+q xأVﵤϤ%V)).xiĵ \ֽ@"{ BƁG`D(gƽH&ne- #: |t|x|48 @h LOtA{GU,Ap$V*W1M gjKƘ(*8}C&I_b4_ `76Gmă"zEl@ˀgj B1”9rℴB XOCRu,J.@z e!W;h 2 KEW艀5">HŁwqwGq.qOc{2ي B64&⩹(?2œiE;)EwT@+OE IfqS l=dsqPኟAşhaDGAxҌ\G6 NiJK涔k43bD41f^pq5GWmdbc-'N=VePQL)cu k$o Vދv$$ՋGh= AךXM f-d YX.+yP( 7< MbE2u"^' 2?F{+M!DPaE0XgPb59hUUP"^d .fA;йSw4ρXLjUfH۳U{6KQTPi6R)%zgf?}E=1tR'9{v1a0ssm1:!(N"YCHs"\I%c ,G .޵Jqw5 X^ͯp'<\אԢ!DzȲ-L&DWD(`&j$(gڞ\Ȉ} ;dZ*~6g(') rjK @\բRrTgA%)b8WlB'4&-Wyˇ̬TJCZNa zuULoI>FoZӃށ4!F._t_Xb}Q.}YcRW[O%c^CFTN73Ұݚ? |O?O} 5~m 6>xKs]V0ygQc͉i5T-ۀؚYhb6abXppoo:U*;KSUGLN2#jm7j(YfZ'|9$z,2`K\Doh(phsn?(1Y"#>k#;oűT}W!@*j^4m֔Z3Shm(Iom214y䠛GfFiRbS  !r"Y Ti~nny@'}msЋC* !Lˣ8?#(IO+ QPÖxZ@` 3Wz" AZm'  0}3|p R&^==?Og>$.M˝p^G-Ȧ@է ¨ ?h78(|AZR$Q ۈ_!|8D½]. )j[Nq4"Bji?^V o*W;h&elZ]u 'Mjtꪙ/"H AEa^Rŀ ui5y,mڴ$ƢT=֝j ^mմ"Ho- 1դT+@CSM:[׹.5 у碗(y@!ͅD'`+=Z] l41w kM~[[(ԄsSEi%f"KM2Q,/ūFx}jjO Q`х@Lm(ðV]j9Js0֯<1jƤv<5՘pWm'9{fB~֟NIP6 iL L)EY53r4AzHZB_!q>+aE6! ft#I& sD-`VV֮!)Ae#: AfOm zc&Tc ͲB=C(A&Gg{&aS^n}#Q1!)bċ ,|4\ރDn7Å@ bI#7q '`.<pcedy?2IQv,Tu'3X0W![I)hv;>*機fp!3X2?dU|Sf<`6d<ƐNiOj%<VL'z~ QY #gµ:ذ OVF-˪# Fi#^ `T T[ۀA%̋ |,I?f^eRCAھ'ͭOzpU6琂A^qD@UJ=g)~ iCCPICC profile(}=H@_SE)-vqP8jP! :\MGbYWWAqtrRtZxp܏wwQa5hm 1[{^D1&3˘|=|,sgaӛy8JJ|N]YwcigJo `z[}u[S`ɐMٕ4Bx?o@h뭵 C]nC`H>3~@r'7bKGD pHYs  tIME .Et(Z IDATxy\Ue]ಫ(+*V枭. 6mNkl[,45 47TdM@h KYssϽ=sc6)pV3RLKջ|e6 8VUUJJKu05MyT@i]hJٷ_e۩1IdmPn^gR=gorrr @vpWxx8x{y)f*(8,o//*hcN{ ة8a.4A7h+)##wem6AER|wސəzj+1ZSoD'*]裣G>$}G>EJޤtޛNvEw^>;7S5#v9Р!??_u@ &?{H,SѴd~k7sd^蹻|}7 qtFT[  =k*FTΛg"G_T״lw5z4H}~9Ế^NLsgǵ}wlr~PPD<#0S,9KToZnѼg$ StԈ&x֯ߨt_]>FwInU>+vYԭLd$~j0az+??_^^^JLڪWVyy)?`@Kڷ?U;eN?L?V]~HǨjɭ٬K/͉[ Lտ_S/]ҕy˚xf$<>ZaSש3|:P;wZ|ڎ+(zt?p;Tw@'ݬyJ!>,t 诤-*,WÆbqWRsw)%e_i6շүzWTX4 𵏈uM$uHW2kOZ=^1~>57??_= %ϞG@]1ۛsC̉ﱣٯ߰IϾIw5Ow95}wrR$r_yU_~YY=8s^_珊?_|y+_t~͍o~j/۔!I*..VUeϽ^T*-+U˶휰sikZ_We^-ִ17$e|<-gFmFabo77+wTxXV\5]84#vCuÜ?[o8fiyA+_PLHedfw['<,L5fddjM  ozNqq=]8 >hr={_pXAJ?[p^oڡɩ7mԧOo&URVJ4dy֭F2291QUӮԶ?D&I˾^nnn>tH6lNyn]5bx<<< 7U6 N:R׽>t ~Fiذ! 8@t5WiÆMڷOR?Wnn#V{0y{{jjÆ:\XI Z'ψg7:l>:^խ]K J1^ Fu^g4wN\ܔ9zLT3q4q>[8~IWJ&wڹs^s}l}x-EGojvn'#3Kb%fˤg(t;wVOi˓YBm䰡_k\J _ǍW_+áѣ/jՊ%I1#uޠAJL*#_%ߠRW^e+\u? kOp1֎9~||5UTXLSЏ?SiiZ%%mUIIz 5kp8)?8F M5a8}ͷ*++Rǎ\E1{knz}O:Rδg#sJ_[^<ڬ?cT.I ]94#vq|S222U-ߍطIKy};f?2y|8p U%%% 8@]tjݻO;wZnnfW˾Z6M91IݻwS``7effUUUU R_>#y K߿VXj޸iB|-ZةaC{,AMb2WOZZ+WkEErrrչKggd[%%%ݴ9IGB{c]yZJM=(nds2e>TnM3oW]fu2eMXbԴڦ^ǔ[U/47WAA=~æFCn t>']1VUA=^vc\G-}z+''G:i@UTؚneeۯڴ9QSwoIחv`UUU맀.*+-S޽ aa5 @7lTUe Wx/P~ c'\y]C|I Z'׆̎kVxRZ[B+OîV~iWe+ҳُ Чr{L#eڠt~eDw*:jzpvhqE R uw -xM~Zڛ'oԠ0 :XЁھ#6'ny"=R:|V^u6D#)77WvQ޽͉[4d ?*Fnnn5kOCy~}RVj0Fiօ::]i]#j12*((ktRjA]4mٺM))ꕟ.ɤARqqV^u| SzюG!(O˳_̨5-sn*(kк_SM?%X;@LȓA3 uC#>re6mNNSlۮ_mot~ff23:z_,k̦kZoZAa}&Uk{~Nzǎڱcg:mܔ,o$mڜ18zFn>sL-0usx9txs%I&Mwo#7 ՜q qL8oO22jxf׊T\\SuC@{p*d;~}%%>˚+ci0jgZ/QLz74W||r.XZ&Ƅcbܹ[~a4qUwlr224czC d9Vk?ߞw;fSЎS{V}HCWC|ta:DRYE^l*]_XOٓv# s[~U%鎻Ռة  ҏ"b*YŮʥgdX=E<ȿ4=aF0fM8^aZs1~u Ca{. h †:OS}L]'3W_ @_~Uw5OaZaR8 Ǟmh̭QHmխ}Ut/f{u/p2b*j0VA-^W_[}7z㮔sܕOڨi׶xzPܚedd?>F@+lVUU8IF~eeԩ#hN*-+"6L`j/ѨêRpww']P!ptW 8@f3_cUUUT;wV^~>|hǸ< @xlfyR+ j_38' < @x%s[?iz';]I%E*jѺf 頹=5'0_AY +Tjw^lg:zz]\RL*/+YQ??*tXXUm0Uױ7RnRک6YmvE\^͙z +>>ЁOZB%.K*9}o)=h}G#U]Ԡ+U*2 }Ԩ [N[t$T:mǪJ]ѮJ:7 ڏKL&]=GzR@nyg~b=&.濽f6>ߘ)/?rkwek_ՕKJG_nkVBSʓ U֕g^󔓛Wo}siysGǟ%W>{_5/W}|~-tPhpM[1]r+]YNB,2֭kJKK[\XX䝻չsg'l+/7?8nFUWUi$;ѣzt[h>PrwGH1#oV.tVf^I#4wgIROoOɕ$]7*}A5#7H,E;2 Ǣ;/%C%I)ҭJ8!޷Km۫t[?{Go==kC&A*9BkB_qyVlTgk9k]ݕ-Ѩi#zKI.Ƿu~ܒe?.pmz|&%(А^ܳ~}H>Nr{]54-R:x}ѷtPɫG/#Pw߯)7Q-q!z5w@]W6f2*G=uHUث45J2TWEiVJQzqbcŢ>{JBBj$mڜW\ڬ32S*vڕkݬ Uثuϻ?{O-m&YyGW;z裍יÏ ȞZܿڒ`o׳kڧSWoH/OG[z(ܑoI;zO,ެCEa=q~WyuML_wymr?[>]|cTRQ߾VwL sWؖQu0Dorb^v낉$}-CWwQQY^~zv#=">D]1IU*,?:BK'lERteQzzqww;_֘1j lz/k=ğ=r^xiKJ4rdlŵ4hP<~r5guZY.&;i;I9ygUAt]WO/ݢb&꡻.PmA|ߘ#{iuxqڪt0Df/u͟>v$ Y)^IE =/W)jߗIiѧ.+_CE咤^fTMKzLe.wjOd-kMr^ii`&ҷ+:$f~``WIeC,ui5FϴVяh@ 99JMMS>>ފ6X?o3YR6mQjA͜1eWT?_Q>u I;URZMӦ]֮-5r0z0]{k˖_;}rl6 :X# fKg鳷;zYt%t%/Ǫ٦߽>k|sGfNr!/uq]]ekiA 'oO]}u^p'-|P!]t0DY/O_mSnqvP)GQllTepJ} 9Er@u۠4`wc17k^jsG.8ե9͵̾=w$d2K'iT^~Dzmܔ(ɤd83PUU}+))BuICZ 1&_s4c=qc.WOH svz(OڴEgݮ*te6կ]Jzzy =6u:\jO_TV ?>AA%I~nzѺ;uݷ#zU^P =\-е^Umjf~ΕгsMm]9>OEt#"\qu!] n2ӬNG[KJTdmQ3fL$IUgw>_3v; y{{5狯h r:u /OO%#Iȑu IDAT)I6@>>5[ǎud0e6W50" hDnJ-m.-FAOOi7E_%O6+!%GoKnSH7?Wx09((YrJ+ѿ;宧H"zp5-G?2mYGZQi>wٕyTWjnz,Y|]YEZUˇk}J樸ܮƧw֩N urEaz{nT(5Xw.\;NnE3UXf;l4WǣB}YLKWff. FA R=:p~NxbđAz nvx:BW\6Y{W@.3X9Q'Ϟ=KeeezO?|0&NT 93^vZ+9SZSFg)v|K+a_޺s:zcKJZ[oCza@]`;shn*}%]u??ulUVV{jD]{dzb?`2괕}܌U:}'iޤ0=dY&&ի גg;vRTp]43@h.gq봦ഔ=VG^pJeͷ.m= mr{ewGŮgJǎSc;zc;knfu ģF[:&xׇ\V c;QV^NI<^).)QJŜhTnTڕ6y|] Ӵ0+MI%ld0?޳UZS^PES*N/s"UUUQ1}Y rss] <8< <x̛8)!t8VaqNo/ Ѐ鐻bwss?򇿳6 8Aho))琇ŝb7L[ ēN(~fd2Q!@ ƣ;47.>^g!j6d }>s=FUWWKF./Ad GI@u. FK(oN+;NW`ied ȑ}(G{RHQ`r: n0dtn8hRUVV=[Wg67yF|cώS rNeefkC;h@uvB9Ʀay<F|{ s@߰ h- -s8<hlmkAh+~Vko1 r8nm4kЎa{#ϊPNp u;!hGl vyOhN'm8ځs2 wxOhOhZAk;#xB;97 !h}%׆ g݁ CyaPinx7$( 2 2 SNSN9U]tz@Opwl2lnzFA2Մt$IVUu$#A^d$$RFkA:ӕ Lho,WWWbqN>Ynv8dUd2ZGoyEMF<=,d4m[}=N*WUUӑ ⮪&G-0(Tmv8r8򰸝Vq l*ӣ]bɄv: ӑO'$ZՄx@d -"^cihQ&i {mݧNw>}O߱z"26?2258/Jho*;'ny/..Q ho\6 ߻^/]nE%',l6pr -i<Π( _}Nk=Vh ղ0ϻؽ=g5?03zWؽ' w7UWW."ݷS-6sΡ0'>x u{5L{ =']!d4^ Z4΄-] [?AoQ P)eJ]LWD}rLM!c5O.לH_n|Fht2m庖T:E":$RgN 9շ'Yc5$4BQۉVYw|L21NϬɭ#D}č|tBC#48=(Ayk&qӷ6S uu82O\k[u-s[o͛hֵW\xLpɮE*vϴ(Io"5X'fObSpl{5OU?Z$g(u3G.4,ksB-OΓnWzgiGozP KVOR3sg =I ~P$j+7y;NSkr-{k=SXS/SbJ'/~Vsq!h[]vHRuO+Wx}0)OK_|fu_mݵC?>{$)#Y?mصC?v%8QkkXo^1Q? Uɳ$>zY$b=r>z0O,vܚww+]VGww[L'_>޾rťG֫;]C\ʣ(nr- CKClZ?x@~a77T^O-)r `RG"5y]Q~ Z%vh}e\]|~beYG+8N:VEr5vXP;ݪ5 ֹJ?5K-H:v c^?^oJi݇S P$7C1~NjeSkJd蹟5ĚUr$Sەkjǥ\k~?#s|5saD`? G6|cMtw%pZ>Q&hjuG /Rfn\Fejz>j975KReee}i2sg͜X:Uzz͈76B~6X5Ȯ5[%ɪķ>/I~1.X5{'5'vbg=x۩QF/}kZ~oi^9<  kC9TKFWWخGIW^>5/ ZxVSfhԑ~cghJ@3$2DIqudY[.ޢڼ"4I dKP"5:jf}p`=EKĨQ=:R.Ԛ"P)b~')c8JGRIBR,irD  <ϞG_| Neh4nh״>޾7|2d+KҸzOZ_M6)/g~&[=8~j5Kg1Vm=H=y]>ZǮQMyR@&>z}BZz c rEvx^Y}bp8p8TUU*wxus-#P֗_~Y^z|qq>狀Kԣ{{Qeff8n e2e6e4e4]-egS%)jmdz0^2HܹI.4UVV[.T@o^ uKt=isƎPh^C懟4b0eff/~SrreXQv,$&w(bU %Kmh=xVB_kCsiiqk ^Pff~c)+k~V!屇{lvx/--dsj`o(EZu<,@d%ȕihGwh Zn+|ee***=kk8k(+;Grss[>oڠl0d2d6UXdU"fL&S}-8. rl6U^^!~nBfN o4Crp8N8:6Opk= >sZBPyxzֻア 0_ەlvSFFl؝f+##Kf[E<`t+rr%NOi UZZ&ˑkWSw @ `PEMiiO~~-*j-VQU&YQv''֝Wk2Td-ÅrKf7LF,$fPUeJde4n4>O楚^khdPuu%r8r8nu3Lr^w^wM~7P0Z7  [|!^R0\7u|c u<^ߌ_Wðt:]˟(78!wN[ M-ۜZ; IƂo;+ZM* Roi 'BdC=_9H@x <x@x9>-' b*3vřt8ykdddiZc228c 3Uи -^<=JYo%-Wb"/1%J%TNh%MhLLFw3##K:Cl۩5k<=|EeWlIR&kE5fb]H+|S+%Uj_% Vv{Lz/M>.}WhPxM6TOaoLRar^8g xI-ZvII߫_ޡ7^ W_ڞN{)z}[[ԀYgкl 8cΒȹ+0_E5w(1!=!4MR=5Oro狶䋟"&\YG{Խ?ߨ otb^K'EG /Fe8Sg͞NOmbzydEwu^yn&E`F%dK/IOϿY-R?H%\4&s~m@MwԿTTxyz(X UAA3=b⮍3`v{R\ԃiwj~<,Kv$gGIcp8\?UUUT], :k+#=xg:Ib6M䭘ˢYaz9w*%&,vy+H]s}?>f=3f0R< <x@x<hs>WfE(s*rhV{'4j `[=]dQ48b4utϖWv৓T>B. UQ{,AD^•4 IDAT0UUuI"""""" K(Oίk3Gs`~w|o ݅tCpbB|/9mMH/gwҚǎwf<{r?/ѕo"xjA0[is*7uVuOTg*Iʡo!M}D#wm".')W`mP ]p-ztjiqʥ#W?8ef7Ջ6 <;f[l~Z$uqэDw/x""""""̀`NGj&&5S S2H%8[1\afyKfDv1޿qfxwٻh8+LOO00y߹:n#gS q+,G/-±hVcz{jl۞-\z7? `_xr u ~l-3w|%|\74}ăCd ˼DK'G{E2'Nn.ў:BpnN9PJG^h5\oZN+Ņ 1SSG)?Hpp8Bœ N1ڙwbQ[]rR(fQ,[X2/"""""~t ަ>!h񷓝1ށ>OP6e+Rtuf&z!b:hM3|wf[$A\Mm{#L>A:;۰%ʎa@gaL(""""""Gם<3Wg!dۣw}BђC,uEDDDDDD!](Z5;ީ!?E:EDDDDDD!"RICEDDDDmR(((((\U'8~`O'n: ;ɵ1rmhI)gC PǠ IҞ"""""" Ws\bnOqpȋBu(|D phH,~/j dYXXX]ldC(e:}""""""]?E=NmCՆ(݇()XPU$խhSUP]d ?^썲a46|r-͙=lVkTUkM={=DwSꪵTmњ:GQWM%jlw_ma \7{u} pi S]qȻWo#7_ݡq`R];,lBjUUMz~A""""""hF19 w?Dz2LNfr0* #tvNa9z,xjrɖ!ܓX_ fM7GY&Ph`Ѷ?DZzzav~>JƏc[ BE-=Fk`S޿#̥L22fh.O1<2Vg83t7f ~n;ў4fIdH-s} ?13?dc.櫿TNZ2ر {p)7?r650%3:1LWlm{iq*z|5D!35 "=Y!0C6}42MX v$<<1Ci lYؑmt$B Y^?t7G`;$ֱe`Ѯ^2MGK ل{d2e>HG4LăR}Ek#ѰiZD{2=unz\nx=N`EHve>!28&7Ie3h{F-2Hgfl۲e8F f( lc z`Y(N2|l=@[>Ƕb#G$""""""}>en!aNyᕎ0>* ! S w|NRS>:H\|ߣ=DlaY pJxِj< #<8At7&L|0A`9MyE2ݻ L&Ѳwl(fwڴTTT }\+LVc>""""""WeBҾ$*_[My0]c0{l7Bcټ0J vg7c;h9A\7c{v2gӮ!>40܈'@k|WC.wTWưkhiypi=C#'hn׫Ic$yy5tt!+]̯T XQz}zor)n*5Ї֊$M}JO)?@']:}EDDDDDZsZ1;KWZm'5EDDDDDB/r-zkۇ""""""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""" """"""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/""""""" """"""" """""""/"""""""se)WO񓊗VtimfWEDDDDDD.e0+nWtӇ9?Q+d?;^=5M}WߥZ8+:Z੣N""""""r <^s+vT_7Ǚ +o-V60C7mk4W;o}_}FӗK_cg _vfڭgs/U{7w})QWaO{RSw?K/{6<7ȧ=G5Gs]_S^9\{+m7?Kß_O&?c<C8+Vz ~g*>T{|4^DDDDDDZ_M7}8N+?fG >}m xe nq𩯰񇝷 4F>vvѼg߿z?#Yk@͆7DkkXkϞ{ի?o\5^{+nZ6/sOd(L0w̿<祗_YXm~YfWW4|^DDDDDDZw 's:ں 8/׸LM/+VJ_G[1W+_(coq?}삋U_m~[|w{me?^;ŊnuMGwV̾輋O|~ȿg/ _XA]5/';6޴tm>r $j^z |w?Gȍ7 """"""WN5ܓ83(O^x曜:u>J%""""!5((((((((?Oɡq}e4;4z~k >/|`sd ypffXRݡ~Q|̛So ȸ+0W """""""Rw߰tv7MUUaZQ(WldU;TUh}kxxΦ(UN3[DBwC !|8 _A8ހ?@2G(G75bz'Muoe -kwh`=ƆT&κp(죽qƝdsoI~a V78TUGo#6jN$0߮Efw\*Lm6RuI[!:#smj\KIJUqь  <@yt~]E9eIKk19XiT> O8њH 28^Z)|? {LίdrF*{96UA$d0q];~Q^Oh>»3/>MoCoe^=I+/i3S40P~ҋXܦAŅ Т , `NGx|j. z &>EZZfm6pbfn:Iph+D㛰lȶn_a?X[u;{"Z{b5 'aԣ."""""r:Va3+F̐/ρ8! Lš{9[Ow²2I4 ('nO![H1Եxl#a Rm0Ce^ :8,W~{mn}\, ʊ"""""" j%V&ݽlsIw>Lq '`hOyt7'[IX=elcHAz1} Xmԓf(3.AwamA"6ĿX:T15]Fm c6%DDDDDD.N5UN{5yO ٟ=wG9}7ԩSUj, Aq.~+M;p"p<""""""׼W#U[v3Л`W1V!XFL5jaN~IЋ(((b׫ DDD:} OȻC]wC^DD{7EDD.3g79poZNDDD]DD7sZ /"">!"""W2ğ? """o+Dk6GwPFLCwO^=ϽPJo֡v'rٶUk u 3%}Z nP=/e[+G"""@؝Xs.b8veOpɇ39v'4rCX8b5""""""&޵rXG"@$O#daY,kiGpi$E]VDDDDDD*]9 ֡*L]> mlwj3of].)Juif(SR|AFO]V;kj>4wLV78TUGo#Vָ}fsPB!Gh3?c\ktK/P 5?P#?Z@o_|\ʡ.@ׯVDDDDD/:Ƌ2GƋ3mmc 6,Cb yb))CԏWDDDDD-*jafȗ* +,G/`a#70ҷ|_;8N|.eP&g瞏K>s|р|v\%4׳<{n~bn~¹ -]sӲmZf)J:싎ni ٵtyQohIw&[iҝSd&B#<x$lqyDvO#4ApɁ"}Vo5Hw$Stɗ&=|z5{(HT]DDDDDDBEdmhݝ(;gDBDDDDDDIDDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDD_IS]*)A꫷{?~j>6$K1څGUuA"""""" S>ѮQOf2h&44 W_͌n?G~Ƀ{I0A{ y|P]y-IDٗ,Ы)QCXmeY̶E } ;菨yDDDDDDo.z#\./~)ms}(t6FrPt;@k UQ:P4ݵ1zt/쿍TrO!Z*ڦd~~:0u} 3(ev<7 q ʇhJ2zE>mu@;{tGȥR7wͽGM5 h;Ʒòk[e|=r[o#V[.LP[=;].>BgS*E~f?>ZTD=DDDDDDrxdK noG\'!:39|hnK};9~F;ÕC=2Ç{{6lk qNZ2ر X4iHcIfx^ L:r=M#BV ɄCX'{h 0 ;O:=EދFa Ӵd8zp+6ɞ DW&v0]1˲$H al34]Q‰o!*Qn5RC00[i (>PFl‰?i]:G~x ʭcdCC1A=F"mx3[yλ~Rd#mq, ;G9;U ڶD"d.""""""2Yx(= < F̈́f-iV]`!2z'pK3;+*Ǫ8)g\|6bǷM料q6Tlo U<5CAy9JE8pt ǣg=B0M)> *PQ >[Ǝ`VzBKD`Ov-su -#@aŧsՓ2"@=I]ht&.T%'~Q=Fp=msܥ!""""""W M6Fl93ɥYyM$kAӋb[bfOPOQ(L1>@d/# gqyOM'i.M|h| m8>6yM'7aPJI"R|[mBѽ,QO?Fw+X^N ozȬ3;@\)EfMC[h^='(J)_y2| Ӷ -±hVc䖓58G-qa-!| "vGc [P./!C؎I)?h 8#mضCkas{밃50({gR\|!FH.B[ ] SrqBz FaTvFsӔJ% } Ot9`L{rtWg͕;E&W\p'`hOwe kݒH鉼H{+} b{jXfk[ўHԜ\Mb][`8홀QlpW.{/P[d{,3`k$uuq:4]alyƦs9h7h1YM}.)4h HlƺOiA FKI'Pg=Nm]*DhZfWoɅd܅󜷽c_%Mc]le}Z-UwS94ݵa@D|={3gf 7|SNqGWD.ѩ7ODDD_!5%R(Ȼok?Z'("""""" """""W?0/E[?PcЋUo3Kw_~̨DI=""""r?=næFVK/K/_<o{h"x='>a=k;;[oa};g-|ijp"/""""r%=9 ܮcE5(\)/ _)o>#^ZƜýf8Ϳo`'nS'X~/.Y޽|ݏx"Z{/ ~ NW_m[XkߡADEDDDD./_`YyVZE/d孷v<|3|"+Az㢽""W Dkng߲ 7/[_vɦFn8Ekn_foWc w8lR{|՟%[֕#yup_Djx^z{$'>~O|r}O||vi~=7@DD^DDDDd}rͿw{Vc\yx7m֮msϓ?Z =Xk߱0ɧ pDw@WUkXN0Nm6~:I]>&>@ 4L9 ;pCa'#GJDDD~XX_s)=uŗ6uIݵ <+'g?;DZ +p=@M1ڻ^j{kN-۞z{'~o/,|w>O♿?N=wED7.z# ˲, '= =@>PË\>Y Q׮m[ڷmY D*pɅ{w?w[+xϟ{"ڡjCCA꫷x)nq.tNaC6RW> l~CUu>r[+vT; C!TWjC4e}EDD{.#۽],hWrm+k;ȍ7o`}m[gSt!/ua/_X~{UfL 4o# ۅ~]9GfvOc8fmm!Uz;e)&̇h۟ep4MW$`8mв?^LIP*{H2Z5D-b^X Mu/ߝ5]C8l▎=Bhabs=Q`ԐLd[4$>B."""׮>TBuXFЮ|C_z~}Ʃ"zgD( Lw-V`BԺR=G`D0'<@JGhč|Da0a%ߣd 㝳 l?RIo"r{Ї0)z]%F,&ΝdW$E@[rQ&Gӌv tx[ S 5/_[[$+־c',}˹m*lj[~ o};K/"rxHvm%$U5!?QCЃX4iLe9$ֵF[Ӟ HLbtX36C}ѽoޏZcշjٻ;ݱ}N.JA!P'-An1jBo)2Y3;.6w/6}-1aZlf*#و8YaDmDh~g8C$C$~+kFЏ9:9&XBjQn!io@Bsfp:jVNZB!ń{m?q{GƜ{Og6Z-}b_2 B,bt쿾6gG%W=5C|WqƸd,!.WR B|/[oM .>G?ȫo2x*'o7ЃY9B|/K˴H@!B|Sd>}=Y\į#huu]  30oGղ7-B!7u ~m=wSw|3WyYu/LBnlHFQ!B[͊_o8exX~H O&K9otBY !B1ᇹ8k ?b x4}'͛IBLB!ߨftiA^~c⻗.UX:1B B!eK'G>#'<}?\=/$钷BLB!߸ 7}9 ˖-e+Xb>Ͽ`/ڸ0-)]^VX1 kFx!BqClSz/&p"/s/Ot ? ޗ.U|9!B!!l) Zt.v7},9׼^?o~g1(g1 >iiEG#{.z FO/3@4|ZlBNd B!]ǡ_Ͽq` !HFB!M/,V,y_b_`T$#B!Ou?JBY># K#uև* P7 f֦(vB!B2y/g aKzq`Fsc6؅ʹ؄^/B!G9 ­F? 9iwhH}fw)6n 3FMRcFy='sHz^:cʳ Mmn؏FNf&jHB!BqQ1"v:!Ay z'z੩ ݇ :8;N[S+;(wBYg=4h93^CP^!npq2ʣ n F7go{쩽}'T̻onُ?m%psX}Դ !B!X^kN0)SF}+2|јQm֡66|vBH&{Q0׻8B&O΃L;9S>+ۍ)zsnlF% u ݬDI6apB0FmP(֔IQ(B!BF1dB!B[0נi Yp!(|`AզFV=T9z*Ao5 ˶ֿ_ AUVJPC=<4 NB!B!n^\enj ֓S lXVGC:r42oߍiR;FEQh:h/Sּu£F{c!iB!B!/Fk!n~z^k7ꫯ&㌍qםRYB\BoE)pƄB!B B!B!B!B!B!B!$B!B!B!B!B!B!^!B+PJQ43iQg$3LIS!1!-bTB!ڃy`0Dhd7ckĵyx"bJhۂI+MF WÞ}BR&#$4XtO=DEu(ޑٞ.zZdd`I'q9)kmV !G^U< M45wlWWtX '2~B٦-W{ǃKX-q (^^;h^H6T uSS\kcd lѥ!5ݳOMyvQRs?%ߋy{s8|%YX,٘Jo_lswnV+00pYR>S6;އ{i*G,&lTeX 153w8m6A3! #87 !/}#/)8֛xߞ8U7 iqq{0:;U3v\F6ԕgWMMUs5+.,;I ncL?M%4Mg){rv%رѶ#<@,&adgU%u߽G6`сao}дP׭$SGL;4wBE4 ާP/MetL4cM>VS\M>o_anBwqtt94o Q}mYi }⚶ĩl}dV๙jirkԹDׄv2"]䭵T&]Bq57Fc^ŭFE;z!#D&5Po7"=ZOOR=z}e:wm@?y}>ic_H/h|xtIA']Ԟxou`O/l e=y-!1 >0~?n,dg" ?r .'N+P_F6_UXJhD\ry7g~&g>V=ADQ+JB% cV5ѼJs A&k+?wtVdMIm;Zoɩ= m^m_Nixe7X۫36ڋ{u|wOBK2zuBճnsV& ?ՎU?2g~gO:iޒ1/Fh;`f' և7K2Ap/ ťȧz-Gq4( :9RkΨR1mv ~ʋh>҆z!Y">M!?HCCڟ`].k5@ʼnZu|!gڍ5 ƯnSg+nd.ksGȜsY:!,v-AczFmU"`N,>CR[cÁ *EE:m[r|Fx`XÓ0ڈub#t{s}U6X"1K3pTRGqk2  J$`6B0L '0딉ƃV]h -L[hj섻q% ,Sٚ7meK$@S1pRrlj%|Gl Mv qR6mL2M64a ]#P *1bJFw2S3Ѝ/ 1>/n>؅-9tlF 8nn{MЇ1mhF_bKs-VA6EMc"g7!y˧:n|!7%YU_Oe毜κ cC/h磠3on+` &NyDF.FU?X,Y4Ms7Ѝ/nVb4A'o$S6=PU eӱwrU_A m ֲ6SmUS#~G5ulf עl|/c=ӲO 004O m6 ¼m )?3skLkY޴mh^σ!*4]xGg z:, |>/n4;zӗ)Ƨi߄.SeM%f2ӭ(hټ  S}B/ޅ;nCIAyv0M(9Oekn*M,S{1Q{J=@qTF56r20jᣔD9S/CL76FUG4> V|2kWU:zqTY4Bqݡ<˻<FJk܌*R HO7QPo{C6ͬL#=J'6QFy(l>m$䴓Y~44LIC7!9hHCkl %l1ÓY߯ű_UCMU)h&YWsޒ9|_b5w#$&9@=kA!#ay$E34&.cΩzVL؇qh:3MRtO tmīXބ[sLkF?ўjr}: ,:k`Z;wU&fWD+`M Ls ,2.n^  1:Y^p ;8u։]2YԏɩSnm *dҘ{N(kpFC 5u,jjzU:9s<5pϳ,gM )e_uoC%- 4xâ%tĞ?3uW=DUA)Gg=8}Yr1wwu4ɱ(f+'<܉YOI]ԉ-CKGN?G].|٩jlpꔃ2MUQS~e/OÙ'8T%*'~ls|m ;K?g;{uBq].]Bz E{qXqu?:S#JZv'kquPՆξƀn*[ DtO`6zgPATevֽ|10]50P)NdSl0J8qwjm֕XKNG"le(h`7"`+7>SGex bʰT̀Ng;&%vn[^{m4~[~4؍hbd$蓅T:^TZ^l9u`7nv X*X;Jrtuxr6kּHZ,/7~p2SF}CӇC9o$ٚ5u>Vc/Z+„LEL " ,U&#* E=TAݸ=CFf* '0ޜ]cs .b!ajk7 &ހ^7p~j:9>mtgz;dlf쓚\%#|Rz2}^ܞAjqgAAFcA`4g@K?*[1;wQP f̖|, vj͹X3uqg9B4 ȵ\VN79%~^`pg'%բB$E\蘺y7' sX%S”&=x*|v>Xcǝx\=A }> h#Z@Ѥ$\(r=B!ax k &Gq6a^*.Qqc_tnV խBHd i,K2VN$ 1')Z -,[&65ЋDZja1,ulz'Rbl-pUZeU&q יZ] Q/2aĦwv*Hyq:z~|0vfƉ"l*'ʹ롣jMo-D(;(1kX~@Mtʟޠ'%HH _ k{9q*ۃ{:.:o|$CsFxE\dl;[CǨ+?ΚO|߁bi%9E̕Q\|źy3*B6xB[9V0'Gý͔ ؼK|@㬫g4wZSql,%x&>k'izPG7)s ִ91xBV^Fq9t}F7n\ᾩIX1Sa\KGzpďpF8:3M]/ib3"/Z8:xE7NT& 5[{bh4LRւд`ݷnOvQst"(~w7!uHn|-$m Z^;+AFik|$sИ)d=q3:JǧvƩNtX6|Chq↺NI첱YxZO)D/p u3 ^WN8~o?z{# e m,E9'DBtB{i?ކ5rWlFGzbMC4Rq7rQE6> i mԷ8^kۧ^r.,b͚?D{*И LQ)TF ?{)PfT0d'܇>KiruQcxwn]DG9O O\8 q}#ӧ~G|/Pq{*w+\\LYlǢ7%%Tɦ K/IS:.\lqGzi() |]71q|9L_f{?θk\g5cnx^^<KLc+%RM'vPV!BQFQ<qjSk Cqq5_PH>?C@,56J{Tn.M}lc2eޠ?JCKwTg`.OdN"F[P&+SocN,C4y+6MW&{許Sޖ]*U RHem>Κ'[IUG(iNp xZ-uڈBC FQ78mm0hG54n[72{w%cC)ڈ?n~6j'fs1%ޯP9frzI1gt\m{wʹАҲ).hܺ%F(ˈgCxVSi#E[k荏@0Nc$vDN^Iӵ4'q[$t^ݍ?i^O޸,XtŐpFqﮥj:]xMך/C˜99QoVLiRWG 82xֱo??.޾ی LGGgawq;h?PAqQ) ZGjMZ6@<+5V'F%z:P<;M wcR@cMke+99;B;^3O罹Nq~|yn{IFrK^lJUz0m[맧yo4{:rvn0vbc߆ηKi܆ֻ$dK/n 1:__K5(( Rn«! f8cccugTil+ :G "<2 IIt X훱*5NMNzwm 먛$n*c}#%GFuF !ﱃSw X㧦OS\B1/ݥvz&vnt݅]q{UB#Sug0nf߂0'DPptEI^βUq 6~mF`'f8.ɢ+:,J_g4M̖~ڭl|ط'C ]aC8]'Cd 1}+VǁLyWbrkӞ78bZ[/$K/^SP4n >ڭLމ!$ƄB!n#G0afuDf(M%x7ERbVxWA!%0 ŵX$~T
Mp0Kj͛(.$&#Y腸EHz!B/B|$ B!B!$B!B!B!B!B!B!^!B! !B! !B!Bx!B!BH/B!BHYF}SJP#GE8l5idNh}dV6&B! `pr9[Ґcw{gqV'G:wSQli-B!ֵz~MFEBc}ybQn* l؏jyhbu6^vj'Fi[B!B!ot BF⨲i sC.LUFӍ Qnamzyv|#]IO7STcbD<|t;C8H#5gػCL76FUG/9cF*k z[ o2P=)Hw8kldiU;t8h̚hIG+6]PĶ !B! " &:ʟţJ ':wbVSRP𸆰{9{j/Fu 99["8kO!4curꔛfN?tĞ?%WQU~{9~ÑYASu" . v?j6c\nD6T@w8{9,>jZWWpN_ǽi!`3ps+8#A!B!$^:2c@lq3F!Ƨ{♌Le14ht:"MT(S& A! -Z tzL4Z#8; (Lwjmٞ^Co*:Bpdd4״v„Ba\L: m6>& mNFQ 2`dj~KI#A!B!nrq (?&u3ڬ'hn݂ )ܟGtC4RQQ /2H h%h΀~T 1(c`pg'_QZ-*k<\GfK>f\˭$)dck(/70b}A+B!BK0jOqV=׸ s\Dvr17! GQ,YIU;@3(bУS\lg,'7ĩ>o^a{׺IcjݜѴNFB!BuBRTq52D[@ bMBIe?=#鱰AB*jˡ߄M3c>jOdF( P;"Ca5HHclZ>1תoA ZN7fO!B!si۩4p4uVl.j :j4̾|~zle[iP > a.j`oECNMGp7nC穠 O߃NISkӳB`2v7𩄂83u[ *+%uG !B\ɎkQn E]5"叠3ȑ B!Oa-EهHmg6QSd!`.6^ Ak} Mv8#ZNNc߆ηXz})zP{59*<3TeY:]4g5h Q}LOcNyo4 :rvn0pmʍv ͭYFC(9 6Z:9B!&cT7W=5C|WqƸd,!.WR B!7l񢿔JL BXm0mޜℿ~s٬qԿbԚ`ڶΑ7^}y~@8g3c#5KY5X7?w.:sk?IwSȖ֥,mxsO;aFƗr~ M\y?8?2⤻Y6=:9>i68.%.<]^0 ԧO9/S?qϓS̽o>{5:_~!4z6^㔤d^ p: {iuyk|.}IǓiCgN!gq~0 >|0o >Gmp1JzO|p%g~O΋'3y!.~3ʦgƆ9'7 a:i xKْ]N?O}>y 2 lT:]1qSy׸?^Jby몋kR6M%+Eagؾ1U~愌 ! ~%ѩ//tA񧾓rdcl4cuwRI[d;Ftܟ]b_.S;&/Y5w-t\`ױ9?̉!Kq?-Y<90$M%9qYd1_<ϫ? 9aoE%w>Ywzo1w} V$r"Xw&Oh.hIi<'Yب_2ᝏ!CX w+c*-_?džky>ZH>V|Eܹ+I33ӞL]4YH;P=xG#ڨgK/}s䙊/g 9rpnn|FWko^w?`7Ăd6a7yF}dV.-;e d^!meidKtTTH-xs5 ~3Kxp5IgS`q*z-L5w/ϡ>f|"^"27-Y$ԸYljpy =\ȶp dMvLt+?]^yBKYm b\Cwƣ!qۑ˹k%\e5ɤ}g)ŋqqRW.;ق1ddqqŸs|8cYH^k35g $r/ye'Tn2ɱ:[E3OGa4vEA翼MPt~4R536LKރ+驭l#9ΓCY5Y7")mrV.gAORy؉oaJ[ v]=r?O` t5%}qe[^M1qW\pq8ҭlsf/&LFB—yg-V\IgV)i9KYT^``, A_νco8+ي42/2y9v$o ~K`dZ:dѲY2/e/L],Zvnj*MRpWD}QB< _r;X4KIJZE =~R2-'N}^g ;bo_zYy碸]k5vmm+;0,ֱe//"YAٲdR-8Bڌf]1UI~yɤeƬi/"##l0rEhd ) >p~K-s[_}}>痥Tk,ֿtƇ _r/yxkybx'ZV "Tfͤ<@EFcS@OG=H,+2-tag;+r\ cEe h-g aP*.E}=+fC;pYyN4ҥjVx6.!Z&e*} #2:>##@dߗmcWa;"H=DwivcV;Ȝ6.M_.ɽ[kVD|ox10]SSKu[ӱ7/c/3am.>&V #sxOɷv9+@98,rdm勵w_&3bkӗ'~ ǛPr¦=ԕb. ۣmavYOwG 5um 55PGqkgKW%nn-sohC<@ImNE$Q=}=ώMa.yS g:qN`fC96l_0iu 8RYɁ J43q+#8VHO8"?}Vr׎whIRǮmi#GӗݾpF*=|`sh#ﰷhkw[]{8LJ:T>*m[~{nur k>  1q6מjަ:K ykc&aȲGGg|Ѕ׷ FwkӏԸ}l' ؽ7Cӽ0>kkJi8#P<_u涞QT`wzKKxs/پG_tf9K?v1y#D;ZK~dt5Nk_wVwh7=#ON|4GGzaj#S]_6j;{!#וk a~}vEÇ;~ܖ1$Ewd>ټNqĬs?oˤkKu^/]lqÈ0ݓLB 㶌dB"<ƺ_&0Vr]q~i-7$a֗(Y g_ 'M/qƨlNw|ȏgqz5 Ld>lpqJlChjȾl!HF_v[cϻc"OaJǑгCk%pn #(>z o[޳=w/jWn`~ݣCTlc;c1ڼq k֔=ԜQ.fκUۥ\aԾo͇[]T.w\g0_/{1ڼ?#7ȃv __KLJ1Pɺ {?]]ĿQtG,^LɌwGX4{D~6\m:®-{;|, s9 ?ykA,56eGn wt}!*6oc#De?;c4|?[7+3.3C@l\Wpߕ}]&fabǝ3ܽ/]trg[{5l+l".-ѧC4{g쮋9OX'f=MHzlm`Iܵb|=oV52ms^_ǘk%:嵌ѧRDDDDD Tmz v#bKu\GRy{<.Rc!)#5U;Yqv/dýO^'&;sܴC퉵 ֤pw;>|~8tsp!t5llw0,dO.ez_5K%&(lX,{[ï+{u_tgKwO̝Ezꭅ<7+ y~4E}{;׽cx-nȦy\հv|c=[T }m LU D֏N)Qc0ܺ 6e~12-8p!27*@ֿp_=DFDpAwO^y>_U9K֮ I𛸡QdXqhODȷbS}Ɗ:32Ey/Y5ɉWƛ?Aϖ7c9s*=nc8\D]{7SUEx=`hiIn1~(oa&Lz _"srvWRk4@stym 1~E~'egTOeKgF޽t V3lNGQP۩bgi@k`vS=E, _"krvIp7.WǓ6r*sµ(}McƸXx Xq3Bb40OKHN%{+>VlSbSQG?(G-ϰгC #r~ڌ28) `ۍ@op?Aջx܌{h(]|ЅԔKzl^m;[l:b3O&d0sMyon]ӏP_WGH @!--.vnm>$ zs;".w8ǰٽo ԭ=z Òn}ZH_ʏуlJ);uni!oQO|Շk=i;+G/ ù `?}knU*]gnxɣ[ pC>9,QªMoPUjRVUW 86}z䭪wc%2dҺ"_}*EDD-}?WBeپcYC4WUjqMYG?l 1M~MZ~>H~|6onltO%mSCkoWQ/|sFu\Ug=)?/ն ީgצ{r[ VvWF/Qrb'/?B~NκzMAUG|opO~]{Vx!ݻw1>9~Ug@5GH߿uTE]C"ނl!o^džO~{NϘt-z <`)};:@}c:ڟLy"_6nHo VhcJ7O<_PGϺ]W% #{o=;;1=`u jNI?Ve;gӑ]Tjvccc]8gA՟xeQV`[G=!s6X:bb:rDn`EDDCj}]C"rUioTVI. ЁDuqϊܒ3!آں$}Y'wyoMv*6a;BTUOmχ?0k+1-|uync;>1u7mdw7? ٺU_oȏ>|MU{S)Qn9;?v}bꩮE]G:c{9dM8]$?y u]߃3p d|o xjj;p;o@"""gP͋! ѮTjWH}@n{A4Wq*җĺ|YGaNHodX]uDZ`"" """"EL_}w`H۸yN<7 {Dv}:Vo[A_J]O[YFC^k!MwϭOOG #Ogn`Cꢿ3=ݵYD  Y2.3gȦ6^fd1uTz0oEߗ"1\̓ ĺYkԍv":ޣ-+=Xj?W'g1u̢Iq|/Jי(ȿ`X;󵲏߁Y}#όJcL~_rIo=-~7I?uOoy0/ψU562:+G'Fw9k,t o<΢uYi " ?ϲL}GgXw{; ǠenV0ןqtz<g1=Mˉڱitgc ex^֍3:F:2cJn`Āȋ??fg2[6vfue@W񜍿 ?}EEDDD..k.m"-{z-GL|u?/ y򛝘Q3_zzD4'=ȄWד>,6AX}}!/À-kynՇF*?fT::ь֝{߫njh-LZkdDtw~n'"W|E:vU VaGaM2M!] L;ᜨ=µ'x:gST$K$qaFV3<07r&z\FNu~̱H5;2I\27{Z9Ugo O'xw3bau1h7ƴ_Y9A^1<9 أd^uQFsן?xscY!XC+kofV-|⻳]LVj4NI"n2.ݮ=#:qxn7SVo?;1,ԃ t乤sM}>E6Ǣ]lmq1u=Yt{%ԢΝǨ@{'(WÄ9~x +ic,Dۍ=^e0b vYeĺ'zŲh|-[. ,YU͒Lnov"oGGgJF&e {}x<^,r#<3#|x4w?(hMVװFvRf?ѣbhv0?}^DhW| +g5}dɓ-Fڥ%F 'U&p_r"gɸ8<ǘbY35u05?;Ge϶rMj䶺JLMͧS_: /NRřV..k&X =we>*"""Wα27^ҿie~,xޯa={ub8.û|\YñN~ݛ⭯'H7`0a} i̛ c`/dW2o"R_# ?y*y. \Ve|\S_G8z0קTfv5U`0ulnN̬dh2f&+ϯVPSI݆͑%3z!""""ye7/"ruЁ9xE2 ν|t VMcJQU8V$?M]``BoSRXԋ޹8gӤ'[14k z)`h*/85\䤝n&u,/pJ*3ViYyy򋗑;TSq!H+c次hJ {F^]_OHN#iz= PognYO;'SʘVr74q%y⛆Yd;L<͌h1m'ra,VIIMFLd XȘ>ɀDΤM?PTj${,Rmf̖$ a@@ }:-80y[̘Ù>o׋={#쇰*qN0c4=FŃbt>كXXpc0v;,c58&3ofޢ%da#{DfFXSY*=^fćp|SKszsePv L{Χ98p '~Y(-.Ő҂n)dG F`{w^c"ٖa=F#@EE kNa ch7A! FsFL@"$;> kjbeX10cLc8XM`|>E,9e*eq\L`8' S[;cY) wY9.W9ES2<:g.+1%1K/Eiim-V g^c9sU58&RӒ>~Z^Fn>mi唕SV:9 ^ZLM""""""jz;e89Ӈ,.; foyoj5sh6cn~ 6uxfl)ɞUK'b,}Rkmk҂idn5A KAJ<oJyosJ!%s-0xq2JڋղuoY\a96|^X:i0Y\O5*LE]C"PF4m i͂ eRs$@n˘5)|x K IɚM׏硴;s7g ./~W/oCŤμ)qy+p澈 ,33q禑9g%en7 ϓ̜[H3!/w.gski+s..\ː׋|x5)d 2!W`4*޿] IDAT1i:~5ޠ"!<ɘ>S\ ]AHI3ac462[&xcf9t#w<6 M/d1&yhs ESRI|p&FX8 Xw1?3OSfF"H[Īar/ 73 x j6ؘtiJfm҅cbC/XSSӌ˜?b) XHfӌI_tS˔Zf#IE,t_L㙷p,8'1o3&UX >"""IE]C"Ң]}ç "׾u ೦e/Rf6c&XVRڒdb̈́`Ē:~Φqn<,l̼IxpzxG,lfLFé0lϞAň`&%!lJ~/EXr3=Ŋl<""""""ru]Uu/qN?cJ|y c]<uq#@gVsȫ6 b/~&:lh5^M#FcȚF  ^<^36[辙-ܺDDDDDDKñN\Œ^SRf{GH5g`0<|#BobbN Zno2To6!O'de &LX&e& a1_7۫1"""""" 𡌎id<Y08&f\C^˸>|rr3_ep%z LƤV{[,L),-[Dn~I_po:c6p_F趒a=?2/>Ӿ|<ɘ>S\ ]A0ؘtiF|bƲpd,c?)'@$FMeJqEdze"Sӌ˜'x"b!mc]O3&},Mi^*Cw&g) 7|ٵk%Oy. _bz椐 +t3|w'O<ihhk*H54TDDD>gîSHL^g)x2ďݑ.""""""W\Jpy,iLdNf `eő]4#""""""W5܅^DB գ."""?uo;ULDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDDEDDDDDDEDDDDDDD^DDDDDDD^DDDDDDDEDDDDDD ~齉:ys|^)VFzOa*GsՎʘ"5tj}83 +'9m,K~gN S6S("""""f{P:W|RR\K;%aڜZ#֔Q8zau."""""vU#dtRg93[Hϫ{1>ө5b˘M׸Ek}:|{b |8Q˖Q}>xXO>NB#'2j9odo%yd\֛ 87 !}~\FiғWQ:g"#ZO32s6%ETZOp2 /AsY#my9k($$Wm>3\1g04J|B YEO2wy<㘳fS;~ 905%]I^L!>"r]ޯulf4$h+(. 6(}{t2f)͝Nqҩd:ɦM%K!W}79q0|81}-"ſ9ƋpeﶒmkȱT5eeKᔩb2X<̯f৤x/)%l[5e[*"6&r:S;bsY$\YZʦUpg3u'?+?2ًKشlSOSdR.&d:nfbն7ٶt82'WDv ""_4nNe@2 HmLYĦMN;,&p<i鿈Brf6a4'f%pz 8gf3c293FKcZ<<:'flI"1l_ީoe}fs6$3}Xlf3X BM6XSYFȕvZDDD7]sHw"? LiӰP|ƻ 8]=>|]I f6shǚ:&KFFf 1hg00Mݳxo% @[»/̽ ^O%A!-Ƴܹ1 \S] d[CWrNևHgWK $!j\<>H&a$+ˁ>p)ñ}u3NLcEDDvqޯI*)v[H+Z=g8đ:J&~} BLʯ/PNޘ̣HKNFj/޹q~WѰ} FKؖVNYY9e)d /"Wuqr!""" #K_C|WKN!O$5e063N%vC/nUwvKp@:l,0ųNn\of.ܡ=OzN b4F7/KVsxpXNoS;t:w9nza57e0f0oi >ŕ䊈`pi݈l[*e^~oysCf6i09)yq9`Jk\iR8sP6')s[Mv0>o5p{aVR\)K^LÙ0o6~?^k+q{xJfS2pX[֗|%n>Fr˙χϽ&bkFӍZC;]\Q:l8">/0ɸܑCHNM!BZM/tz?s2:4)NH[< _#N9MI%yhyd/~dz(仇~ܙFgOAAgIIsv)i$?=9Z$[y~IbY| y/svaY ؛0+#)%1oV4:c^C! ZKg6i<$m XxuN5]}çGÇ3=2gh/ں|YaN?s6E+)vw/ZJ MLL'.Y#Fc&IUGDDDDDD^p/tBDDDDD䚣.""""""" """""""/""""""/""""""" """""""/""""""/""""""" """""""/""""""/""""""" """"""" """""""/"""""""RwI`޹7c|(Hйx^)bedWgYDDDDDDjOwyPwW< !!hOrѵVl'ϥ&"""""" W6[7$y0%4n  7cJnjkg J~ v>AP; VȜdgPt0·z<w*e;HFvQLeӪk1(w.#N91$n#""""""_̈́),{+p'pxIL(}{t2f)͝N0e[N2w&mI[ƔXIgWz:J`?<;[Ɣ%|d!8]L_UƶbDDDDDDHO^ khfu5{kF`1e69`2Na: ` ӝE/`a`,f3V4.x:13- ق-c>9Hb6cO ' ONGx3'b70_DDDDDDaWk`p]0ۦa'at `ײLлLJϻ )mǒ1Y},iMkkoKFF 8dz&>h_C4F.xTwY¨G @)c[HnL+5R$mao{A]$ۋ}\6Ȯ(WVCiIj Nu@` VTDrHh- ~=|#I$O<#HNA2{4X[Ic;]S[x+"b.Gta UE'@ xMI$Ik=N_b ڻI%;)fHTד %Y"RT8XZZޮhtI벩&'q|gR{ Ä>!dr8+$I$;6=Bst"c8HA?koקbĬ I$I:'IVU"~׺F{}+s᝿4~r>;a2W޽/-I~%޽/ry_޻"I$ ^|{7reC4l|~+/=ʏ;v>uM,O+R 4=~ϐizb+5?˯j)y{3|0+D$I9=]p+@}uw +C_+?!Z733F_LϿTדuּ~\̿Or#E+A"~ z.sjߥbߺo>FE m75*WO dƝws}_;s<>4GbCU|i06nF~r<&|y'< +.^ e%_n;6x p_I$I 1.x+~v}q'F ghW:}7 Z]<|Ǎ|]R]4yH_ƷxwCޓǠ=86u~s>Q|xL"15c)_.` ޴Bj{> ōgw3<2 qc-M Gݶ=9J,svN^7t sy.fS=7Gc8٢~VXFjM$Itf_#ÈB29+Lr&U7;ȓMik>a% Vv_h5ۛlmҷ>0Cyj3| K.$z<|cşI$Iޑ4 [+Ȇ]:x>Q ϋŧccs($0.q.p'У,|vusnx;ؙ}F4' ́:6`W8' l˓$I$# | 6+v=WR0Mc`8F=}|m ;w gktvmyL=ӯ N~2~XzɆW^96;$I$IyF ӨG?yb}':/7~K_>" .~z0\5|S:O|y!?Qpw>K:s;ct?w|i:#9pOdcߎKݭ;$I$I'Cm2ue}8V#ǽȑ#>1p@+J$Iz:*xk|}gL.I$I2qvs~a̿:$I$7B/EB/I$}p $I$I2K$I$$I$IxI$Id$I$IxI$I $I$/I$I $I$%I$I^$I$$I$I2K$I$$I$IxI$I YjfF&}E=-$I$Ij$I$IxI$I ,:!ȑQ&̼L&ˊ0:q̬XGsKײ 55 >.M멘gWHiu-^kI$IԵDi ̫jnei:24S]%zIDATnKg`vE끼XoHr"hʆ-OeYdΫ-I$I:k^@6Grk-l!@%d sPya̯4LoKA^2 |"fzUx%I$IS)CfN PX3;hiwM.&#З`@>%%AJK4L0>x!/$I$&'趞B>_$)[P5%lH C _E`q[I$I4,z_[O%I$Ig4>D4"oXi(>H8D`( JQ9"ٮcߏ9rN[[$I$$$IB@$I$$I$I2K$Id$I$IxI$Id$I$/I$I $I$/I$I^$I$%I$I2K$I$$I$I2K$Id$I$IxI$I,533cF+dz2-#GCYi$I$#z} [[J̥wI$IΔP0ph,*ouH%I$I:s}d,d~6R]VĄQF3b=o[R(g(&Sn3 ˘=5ȑQFO*ay"$I$߱dK0R,}=\ l^hzs؝YGY=Pt67@?@dMyy#5lyDy%I$IGt*JRSDçVZ.Gta UEG@ H e $$iTmO%I$Ig; }xHHd O!W[!(͍OZm" PRh4D&`׬pY29䲤9(Y|7(ڤSK$I '-_X3T#ΐi 8c%s$QVLtjK(_&$1sBi Z„$I$”,Kab,^b|z!f/#YڪN_p\~[F< $\5$I$'x pkz1z@OF^_z/$I$I=ȃϴ=8KCK1nw0fFw6Y_g0`Klk~g}\?G ?Bzۀ$I$I7 }l<З+>y.p>W ?T;'_O?r܌Ke:sO&` C)QF˘!!fCe$I$Ƀ]%v&mKx#|[͘`8o|fL83r1=-WWu[\8o՛xg^₧_&GOu{Z4~O=ȽrooH$It?QIu26O_̖alwS vz[ދGes(ןO13:'@\1L{O CrWQ_z_DsQolE$I4}XtxnzF~}Qۗc.f,?w@g+Vp_޵퐏0c>H#0s=ԇv]O0û$I$qZ&ҟݗK|ߵ_}sOWB{O)Cz~s0>y}]{rE[O!#s=wrЁ|1K$I5jm{j}~$/㷌bU3PGGǾ9rا6oEI$IC }Mgɷ>Zk4=ًsp.lE$I%w>z?~6cl:C^$I2K2K$I: $I$I2K$I$$I$IxI$Id$I$IxI$I $I$/I$I $I$%I$I^$I$$I$I2K$I3*g9a{3r\!֓ɝl9ʢLN!)9q8.[+2lVmg)P@Y]֖'I$I:<@kددP5P&f.m$.`a= -'ӵw(,Fa4h˓$I$)a0тYT2 ֑x|AmxLݏIxWt B%E3QM*lM-)0EI֓9nA4EuҙM,/-b¸(#GǙY~7c;hbB!K꩘^y S:́lM KבX3IG˨XO:׽*pt)+dጞPLE] -UuokXq7Kbf#;)V:$I$dK0R,}=\ l^hzsuEasT\OiMmw."S}(}~jt e h\CՆ?|-Pg}=[x$RW:׋%:}7xjmfV0|[tdװ'ٰ?=ݔCK9vn7#t[ٰI5 Xٲ%I$jr&,_O` P2ʕ( Gx"";=u5w)Zʒ|"0T.̧MO׏M$6uHH֓"~\Hjp`8(-n7ZʼX@ HVn)RSm\ L# 3:‰;{ o`a$I; }xHHd O!W[!(͍lLX3D4"KkVp8L,Id@>EA5멭m"\| 7ӼD*Wi rќ8.J4D8z[!m; "{7.BD"AɭǍϤvtB&D-a:V3neK$I㋘kzy=Y Cqu$2͛X^tuG޼}d2ʪ7ќɐNbv k'^h '*YP\t?N8چlTi~Jyd*YL&&&&”̛&V+:zIKuBσ9˨i"͐Nz*S^"͒NSeK$IT)Y

'TextStructure' class_name = full_name.split(".")[-1].removesuffix("Client") # Convert to snake case, e.g. 'TextStructure' -> '_text_structure' snake_case = "".join(["_" + c.lower() if c.isupper() else c for c in class_name]) # Remove the leading underscore return snake_case.lstrip("_") def add_client_usage_example(app, what, name, obj, options, lines): if what == "class" and "Client" in name: sub_client_name = client_name(name) lines.append( f"To use this client, access ``client.{sub_client_name}`` from an " " :class:`~elasticsearch.Elasticsearch` client. For example::" ) lines.append("") lines.append(" from elasticsearch import Elasticsearch") lines.append("") lines.append(" # Create the client instance") lines.append(" client = Elasticsearch(...)") lines.append(f" # Use the {sub_client_name} client") lines.append(f" client.{sub_client_name}.(...)") lines.append("") def setup(app): app.connect("autodoc-process-docstring", add_client_usage_example) # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "Python Elasticsearch client" copyright = "%d, Elasticsearch B.V" % datetime.date.today().year version = elasticsearch.__versionstr__ release = version pygments_style = "sphinx" html_theme = "sphinx_rtd_theme" html_static_path = ["_static"] html_css_files = ["css/custom.css"] intersphinx_mapping = { "python": ("https://docs.python.org/3", None), "elastic-transport": ( "https://elastic-transport-python.readthedocs.io/en/latest", None, ), } python-elasticsearch-9.1.1/docs/sphinx/dsl.rst000066400000000000000000000011731506101734100214300ustar00rootroot00000000000000.. _dsl: DSL === .. py:module:: elasticsearch.dsl Search ------ .. autoclass:: Search :inherited-members: :members: Multi-Search ------------ .. autoclass:: MultiSearch :inherited-members: :members: Document -------- .. autoclass:: Document :inherited-members: :members: Index ----- .. autoclass:: Index :inherited-members: :members: Mapping ------- .. autoclass:: Mapping :inherited-members: :members: Faceted Search -------------- .. autoclass:: FacetedSearch :inherited-members: :members: Update by Query --------------- .. autoclass:: UpdateByQuery :inherited-members: :members: python-elasticsearch-9.1.1/docs/sphinx/es_api.rst000066400000000000000000000022511506101734100221040ustar00rootroot00000000000000.. _api: Elasticsearch API ================= All the API calls map the raw REST API as closely as possible, including the distinction between required and optional arguments to the calls. Keyword arguments are required for all calls. .. note:: Some API parameters in Elasticsearch are reserved keywords in Python. For example the ``from`` query parameter for pagination would be aliased as ``from_``. .. toctree:: :maxdepth: 1 api/elasticsearch api/async-search api/autoscaling api/cat api/ccr api/cluster api/connector api/dangling-indices api/enrich-policies api/eql api/esql api/fleet api/graph-explore api/index-lifecycle-management api/indices api/inference api/ingest-pipelines api/license api/logstash api/migration api/ml api/monitoring api/nodes api/query-rules api/rollup-indices api/search-application api/searchable-snapshots api/security api/shutdown api/simulate api/snapshot-lifecycle-management api/snapshots api/snapshottable-features api/sql api/synonyms api/tls-ssl api/tasks api/text-structure api/transforms api/watcher api/x-pack python-elasticsearch-9.1.1/docs/sphinx/esql.rst000066400000000000000000000041141506101734100216100ustar00rootroot00000000000000ES|QL Query Builder =================== Commands -------- .. autoclass:: elasticsearch.esql.ESQL :inherited-members: :members: .. autoclass:: elasticsearch.esql.esql.ESQLBase :inherited-members: :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.From :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Row :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Show :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.ChangePoint :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Completion :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Dissect :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Drop :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Enrich :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Eval :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Fork :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Grok :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Keep :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Limit :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.LookupJoin :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.MvExpand :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Rename :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Sample :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Sort :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Stats :members: :exclude-members: __init__ .. autoclass:: elasticsearch.esql.esql.Where :members: :exclude-members: __init__ Functions --------- .. automodule:: elasticsearch.esql.functions :members: python-elasticsearch-9.1.1/docs/sphinx/exceptions.rst000066400000000000000000000014271506101734100230310ustar00rootroot00000000000000.. _exceptions: Exceptions & Warnings ===================== .. py:module:: elasticsearch :no-index: API Errors ---------- These errors are triggered from an HTTP response that isn't 2XX: .. autoclass:: ApiError :members: .. autoclass:: NotFoundError .. autoclass:: ConflictError .. autoclass:: RequestError .. autoclass:: AuthenticationException .. autoclass:: AuthorizationException .. autoclass:: UnsupportedProductError Transport and Connection Errors ------------------------------- These errors are triggered by an error occurring before an HTTP response arrives: .. autoclass:: TransportError .. autoclass:: SerializationError .. autoclass:: ConnectionError .. autoclass:: ConnectionTimeout .. autoclass:: SSLError Warnings -------- .. autoclass:: ElasticsearchWarning python-elasticsearch-9.1.1/docs/sphinx/index.rst000066400000000000000000000014571506101734100217620ustar00rootroot00000000000000Python Elasticsearch Client =========================== Welcome to the API documentation of the official Python client for Elasticsearch! The goal of this client is to provide common ground for all Elasticsearch-related code in Python; because of this it tries to be opinion-free and very extendable. High-level documentation for this client is `also available `_. .. toctree:: :maxdepth: 2 es_api esql dsl api_helpers exceptions .. toctree:: :caption: Async :maxdepth: 2 async_es_api async_dsl async_api_helpers License ------- Copyright 2023 Elasticsearch B.V. Licensed under the Apache License, Version 2.0. Indices and tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` python-elasticsearch-9.1.1/elasticsearch/000077500000000000000000000000001506101734100204635ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/__init__.py000066400000000000000000000064021506101734100225760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # flake8: noqa import logging import re import warnings from elastic_transport import __version__ as _elastic_transport_version from ._utils import fixup_module_metadata from ._version import __versionstr__ # Ensure that a compatible version of elastic-transport is installed. _version_groups = tuple(int(x) for x in re.search(r"^(\d+)\.(\d+)\.(\d+)", _elastic_transport_version).groups()) # type: ignore[union-attr] if _version_groups < (9, 1, 0) or _version_groups > (10, 0, 0): raise ImportError( "An incompatible version of elastic-transport is installed. Must be between " "v9.1.0 and v10.0.0. Install the correct version with the following command: " "$ python -m pip install 'elastic-transport>=9.1, <10'" ) _version_groups = re.search(r"^(\d+)\.(\d+)\.(\d+)", __versionstr__).groups() # type: ignore[assignment, union-attr] _major, _minor, _patch = (int(x) for x in _version_groups) VERSION = __version__ = (_major, _minor, _patch) logger = logging.getLogger("elasticsearch") logger.addHandler(logging.NullHandler()) from ._async.client import AsyncElasticsearch as AsyncElasticsearch from ._sync.client import Elasticsearch as Elasticsearch from .exceptions import ElasticsearchDeprecationWarning # noqa: F401 from .exceptions import ( ApiError, AuthenticationException, AuthorizationException, BadRequestError, ConflictError, ConnectionError, ConnectionTimeout, ElasticsearchWarning, NotFoundError, RequestError, SerializationError, SSLError, TransportError, UnsupportedProductError, ) from .serializer import JSONSerializer, JsonSerializer try: from .serializer import OrjsonSerializer except ImportError: OrjsonSerializer = None # type: ignore[assignment,misc] # Only raise one warning per deprecation message so as not # to spam up the user if the same action is done multiple times. warnings.simplefilter("default", category=ElasticsearchWarning, append=True) __all__ = [ "ApiError", "AsyncElasticsearch", "BadRequestError", "Elasticsearch", "JsonSerializer", "SerializationError", "TransportError", "NotFoundError", "ConflictError", "RequestError", "ConnectionError", "SSLError", "ConnectionTimeout", "AuthenticationException", "AuthorizationException", "UnsupportedProductError", "ElasticsearchWarning", ] if OrjsonSerializer is not None: __all__.append("OrjsonSerializer") fixup_module_metadata(__name__, globals()) del fixup_module_metadata python-elasticsearch-9.1.1/elasticsearch/_async/000077500000000000000000000000001506101734100217375ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/_async/__init__.py000066400000000000000000000014231506101734100240500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/_async/client/000077500000000000000000000000001506101734100232155ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/_async/client/__init__.py000066400000000000000000012311121506101734100253270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import typing as t from elastic_transport import ( AsyncTransport, BaseNode, BinaryApiResponse, HeadApiResponse, NodeConfig, NodePool, NodeSelector, ObjectApiResponse, Serializer, ) from elastic_transport.client_utils import DEFAULT, DefaultType from ...exceptions import ApiError, TransportError from ...serializer import DEFAULT_SERIALIZERS from ._base import ( BaseClient, create_sniff_callback, default_sniff_callback, resolve_auth_headers, ) from .async_search import AsyncSearchClient from .autoscaling import AutoscalingClient from .cat import CatClient from .ccr import CcrClient from .cluster import ClusterClient from .connector import ConnectorClient from .dangling_indices import DanglingIndicesClient from .enrich import EnrichClient from .eql import EqlClient from .esql import EsqlClient from .features import FeaturesClient from .fleet import FleetClient from .graph import GraphClient from .ilm import IlmClient from .indices import IndicesClient from .inference import InferenceClient from .ingest import IngestClient from .license import LicenseClient from .logstash import LogstashClient from .migration import MigrationClient from .ml import MlClient from .monitoring import MonitoringClient from .nodes import NodesClient from .query_rules import QueryRulesClient from .rollup import RollupClient from .search_application import SearchApplicationClient from .searchable_snapshots import SearchableSnapshotsClient from .security import SecurityClient from .shutdown import ShutdownClient from .simulate import SimulateClient from .slm import SlmClient from .snapshot import SnapshotClient from .sql import SqlClient from .ssl import SslClient from .synonyms import SynonymsClient from .tasks import TasksClient from .text_structure import TextStructureClient from .transform import TransformClient from .utils import ( _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, ) from .watcher import WatcherClient from .xpack import XPackClient logger = logging.getLogger("elasticsearch") SelfType = t.TypeVar("SelfType", bound="AsyncElasticsearch") class AsyncElasticsearch(BaseClient): """ Elasticsearch low-level client. Provides a straightforward mapping from Python to Elasticsearch REST APIs. The client instance has additional attributes to update APIs in different namespaces such as ``async_search``, ``indices``, ``security``, and more: .. code-block:: python client = Elasticsearch("http://localhost:9200") # Get Document API client.get(index="*", id="1") # Get Index API client.indices.get(index="*") Transport options can be set on the client constructor or using the :meth:`~elasticsearch.Elasticsearch.options` method: .. code-block:: python # Set 'api_key' on the constructor client = Elasticsearch( "http://localhost:9200", api_key="api_key", ) client.search(...) # Set 'api_key' per request client.options(api_key="api_key").search(...) """ def __init__( self, hosts: t.Optional[_TYPE_HOSTS] = None, *, # API cloud_id: t.Optional[str] = None, api_key: t.Optional[t.Union[str, t.Tuple[str, str]]] = None, basic_auth: t.Optional[t.Union[str, t.Tuple[str, str]]] = None, bearer_auth: t.Optional[str] = None, opaque_id: t.Optional[str] = None, # Node headers: t.Union[DefaultType, t.Mapping[str, str]] = DEFAULT, connections_per_node: t.Union[DefaultType, int] = DEFAULT, http_compress: t.Union[DefaultType, bool] = DEFAULT, verify_certs: t.Union[DefaultType, bool] = DEFAULT, ca_certs: t.Union[DefaultType, str] = DEFAULT, client_cert: t.Union[DefaultType, str] = DEFAULT, client_key: t.Union[DefaultType, str] = DEFAULT, ssl_assert_hostname: t.Union[DefaultType, str] = DEFAULT, ssl_assert_fingerprint: t.Union[DefaultType, str] = DEFAULT, ssl_version: t.Union[DefaultType, int] = DEFAULT, ssl_context: t.Union[DefaultType, t.Any] = DEFAULT, ssl_show_warn: t.Union[DefaultType, bool] = DEFAULT, # Transport transport_class: t.Type[AsyncTransport] = AsyncTransport, request_timeout: t.Union[DefaultType, None, float] = DEFAULT, node_class: t.Union[DefaultType, t.Type[BaseNode]] = DEFAULT, node_pool_class: t.Union[DefaultType, t.Type[NodePool]] = DEFAULT, randomize_nodes_in_pool: t.Union[DefaultType, bool] = DEFAULT, node_selector_class: t.Union[DefaultType, t.Type[NodeSelector]] = DEFAULT, dead_node_backoff_factor: t.Union[DefaultType, float] = DEFAULT, max_dead_node_backoff: t.Union[DefaultType, float] = DEFAULT, serializer: t.Optional[Serializer] = None, serializers: t.Union[DefaultType, t.Mapping[str, Serializer]] = DEFAULT, default_mimetype: str = "application/json", max_retries: t.Union[DefaultType, int] = DEFAULT, retry_on_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, retry_on_timeout: t.Union[DefaultType, bool] = DEFAULT, sniff_on_start: t.Union[DefaultType, bool] = DEFAULT, sniff_before_requests: t.Union[DefaultType, bool] = DEFAULT, sniff_on_node_failure: t.Union[DefaultType, bool] = DEFAULT, sniff_timeout: t.Union[DefaultType, None, float] = DEFAULT, min_delay_between_sniffing: t.Union[DefaultType, None, float] = DEFAULT, sniffed_node_callback: t.Optional[ t.Callable[[t.Dict[str, t.Any], NodeConfig], t.Optional[NodeConfig]] ] = None, meta_header: t.Union[DefaultType, bool] = DEFAULT, http_auth: t.Union[DefaultType, t.Any] = DEFAULT, # Internal use only _transport: t.Optional[AsyncTransport] = None, ) -> None: if hosts is None and cloud_id is None and _transport is None: raise ValueError("Either 'hosts' or 'cloud_id' must be specified") if serializer is not None: if serializers is not DEFAULT: raise ValueError( "Can't specify both 'serializer' and 'serializers' parameters " "together. Instead only specify one of the other." ) serializers = {default_mimetype: serializer} # Setting min_delay_between_sniffing=True implies sniff_before_requests=True if min_delay_between_sniffing is not DEFAULT: sniff_before_requests = True sniffing_options = ( sniff_timeout, sniff_on_start, sniff_before_requests, sniff_on_node_failure, sniffed_node_callback, min_delay_between_sniffing, sniffed_node_callback, ) if cloud_id is not None and any( x is not DEFAULT and x is not None for x in sniffing_options ): raise ValueError( "Sniffing should not be enabled when connecting to Elastic Cloud" ) sniff_callback = None if sniffed_node_callback is not None: sniff_callback = create_sniff_callback( sniffed_node_callback=sniffed_node_callback ) elif ( sniff_on_start is True or sniff_before_requests is True or sniff_on_node_failure is True ): sniff_callback = default_sniff_callback if _transport is None: requests_session_auth = None if http_auth is not None and http_auth is not DEFAULT: if is_requests_http_auth(http_auth): # If we're using custom requests authentication # then we need to alert the user that they also # need to use 'node_class=requests'. if not is_requests_node_class(node_class): raise ValueError( "Using a custom 'requests.auth.AuthBase' class for " "'http_auth' must be used with node_class='requests'" ) # Reset 'http_auth' to DEFAULT so it's not consumed below. requests_session_auth = http_auth http_auth = DEFAULT node_configs = client_node_configs( hosts, cloud_id=cloud_id, requests_session_auth=requests_session_auth, connections_per_node=connections_per_node, http_compress=http_compress, verify_certs=verify_certs, ca_certs=ca_certs, client_cert=client_cert, client_key=client_key, ssl_assert_hostname=ssl_assert_hostname, ssl_assert_fingerprint=ssl_assert_fingerprint, ssl_version=ssl_version, ssl_context=ssl_context, ssl_show_warn=ssl_show_warn, ) transport_kwargs: t.Dict[str, t.Any] = {} if node_class is not DEFAULT: transport_kwargs["node_class"] = node_class if node_pool_class is not DEFAULT: transport_kwargs["node_pool_class"] = node_pool_class if randomize_nodes_in_pool is not DEFAULT: transport_kwargs["randomize_nodes_in_pool"] = randomize_nodes_in_pool if node_selector_class is not DEFAULT: transport_kwargs["node_selector_class"] = node_selector_class if dead_node_backoff_factor is not DEFAULT: transport_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor if max_dead_node_backoff is not DEFAULT: transport_kwargs["max_dead_node_backoff"] = max_dead_node_backoff if meta_header is not DEFAULT: transport_kwargs["meta_header"] = meta_header transport_serializers = DEFAULT_SERIALIZERS.copy() if serializers is not DEFAULT: transport_serializers.update(serializers) # Override compatibility serializers from their non-compat mimetypes too. # So we use the same serializer for requests and responses. for mime_subtype in ("json", "x-ndjson"): if f"application/{mime_subtype}" in serializers: compat_mimetype = ( f"application/vnd.elasticsearch+{mime_subtype}" ) if compat_mimetype not in serializers: transport_serializers[compat_mimetype] = serializers[ f"application/{mime_subtype}" ] transport_kwargs["serializers"] = transport_serializers transport_kwargs["default_mimetype"] = default_mimetype if sniff_on_start is not DEFAULT: transport_kwargs["sniff_on_start"] = sniff_on_start if sniff_before_requests is not DEFAULT: transport_kwargs["sniff_before_requests"] = sniff_before_requests if sniff_on_node_failure is not DEFAULT: transport_kwargs["sniff_on_node_failure"] = sniff_on_node_failure if sniff_timeout is not DEFAULT: transport_kwargs["sniff_timeout"] = sniff_timeout if min_delay_between_sniffing is not DEFAULT: transport_kwargs["min_delay_between_sniffing"] = ( min_delay_between_sniffing ) _transport = transport_class( node_configs, client_meta_service=CLIENT_META_SERVICE, sniff_callback=sniff_callback, **transport_kwargs, ) super().__init__(_transport) # These are set per-request so are stored separately. self._request_timeout = request_timeout self._max_retries = max_retries self._retry_on_timeout = retry_on_timeout if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) self._retry_on_status = retry_on_status else: super().__init__(_transport) if headers is not DEFAULT and headers is not None: self._headers.update(headers) if opaque_id is not DEFAULT and opaque_id is not None: # type: ignore[comparison-overlap] self._headers["x-opaque-id"] = opaque_id self._headers = resolve_auth_headers( self._headers, http_auth=http_auth, api_key=api_key, basic_auth=basic_auth, bearer_auth=bearer_auth, ) # namespaced clients for compatibility with API names self.async_search = AsyncSearchClient(self) self.autoscaling = AutoscalingClient(self) self.cat = CatClient(self) self.cluster = ClusterClient(self) self.connector = ConnectorClient(self) self.fleet = FleetClient(self) self.features = FeaturesClient(self) self.indices = IndicesClient(self) self.inference = InferenceClient(self) self.ingest = IngestClient(self) self.nodes = NodesClient(self) self.snapshot = SnapshotClient(self) self.tasks = TasksClient(self) self.xpack = XPackClient(self) self.ccr = CcrClient(self) self.dangling_indices = DanglingIndicesClient(self) self.enrich = EnrichClient(self) self.eql = EqlClient(self) self.esql = EsqlClient(self) self.graph = GraphClient(self) self.ilm = IlmClient(self) self.license = LicenseClient(self) self.logstash = LogstashClient(self) self.migration = MigrationClient(self) self.ml = MlClient(self) self.monitoring = MonitoringClient(self) self.query_rules = QueryRulesClient(self) self.rollup = RollupClient(self) self.search_application = SearchApplicationClient(self) self.searchable_snapshots = SearchableSnapshotsClient(self) self.security = SecurityClient(self) self.slm = SlmClient(self) self.simulate = SimulateClient(self) self.shutdown = ShutdownClient(self) self.sql = SqlClient(self) self.ssl = SslClient(self) self.synonyms = SynonymsClient(self) self.text_structure = TextStructureClient(self) self.transform = TransformClient(self) self.watcher = WatcherClient(self) def __repr__(self) -> str: try: # get a list of all connections nodes = [node.base_url for node in self.transport.node_pool.all()] # truncate to 5 if there are too many if len(nodes) > 5: nodes = nodes[:5] + ["..."] return f"<{self.__class__.__name__}({nodes})>" except Exception: # probably operating on custom transport and connection_pool, ignore return super().__repr__() async def __aenter__(self) -> "AsyncElasticsearch": try: # All this to avoid a Mypy error when using unasync. await getattr(self.transport, "_async_call")() except AttributeError: pass return self async def __aexit__(self, *_: t.Any) -> None: await self.close() def options( self: SelfType, *, opaque_id: t.Union[DefaultType, str] = DEFAULT, api_key: t.Union[DefaultType, str, t.Tuple[str, str]] = DEFAULT, basic_auth: t.Union[DefaultType, str, t.Tuple[str, str]] = DEFAULT, bearer_auth: t.Union[DefaultType, str] = DEFAULT, headers: t.Union[DefaultType, t.Mapping[str, str]] = DEFAULT, request_timeout: t.Union[DefaultType, t.Optional[float]] = DEFAULT, ignore_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, max_retries: t.Union[DefaultType, int] = DEFAULT, retry_on_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, retry_on_timeout: t.Union[DefaultType, bool] = DEFAULT, ) -> SelfType: client = type(self)(_transport=self.transport) resolved_headers = headers if headers is not DEFAULT else None resolved_headers = resolve_auth_headers( headers=resolved_headers, api_key=api_key, basic_auth=basic_auth, bearer_auth=bearer_auth, ) resolved_opaque_id = opaque_id if opaque_id is not DEFAULT else None if resolved_opaque_id: resolved_headers["x-opaque-id"] = resolved_opaque_id if resolved_headers: new_headers = self._headers.copy() new_headers.update(resolved_headers) client._headers = new_headers else: client._headers = self._headers.copy() if request_timeout is not DEFAULT: client._request_timeout = request_timeout else: client._request_timeout = self._request_timeout if ignore_status is not DEFAULT: if isinstance(ignore_status, int): ignore_status = (ignore_status,) client._ignore_status = ignore_status else: client._ignore_status = self._ignore_status if max_retries is not DEFAULT: if not isinstance(max_retries, int): raise TypeError("'max_retries' must be of type 'int'") client._max_retries = max_retries else: client._max_retries = self._max_retries if retry_on_status is not DEFAULT: if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) client._retry_on_status = retry_on_status else: client._retry_on_status = self._retry_on_status if retry_on_timeout is not DEFAULT: if not isinstance(retry_on_timeout, bool): raise TypeError("'retry_on_timeout' must be of type 'bool'") client._retry_on_timeout = retry_on_timeout else: client._retry_on_timeout = self._retry_on_timeout return client async def close(self) -> None: """Closes the Transport and all internal connections""" await self.transport.close() @_rewrite_parameters() async def ping( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[t.List[str], str]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> bool: """ Returns True if a successful response returns from the info() API, otherwise returns False. This API call can fail either at the transport layer (due to connection errors or timeouts) or from a non-2XX HTTP response (due to authentication or authorization issues). If you want to discover why the request failed you should use the ``info()`` API. ``_ """ __path = "/" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} try: await self.perform_request( "HEAD", __path, params=__query, headers=__headers ) return True except (ApiError, TransportError): return False # AUTO-GENERATED-API-DEFINITIONS # @_rewrite_parameters( body_name="operations", parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def bulk( self, *, operations: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_source_on_error: t.Optional[bool] = None, list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk index or delete documents. Perform multiple index, create, delete, and update actions in a single request. This reduces overhead and can greatly increase indexing speed.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To use the create action, you must have the create_doc, create, index, or write index privilege. Data streams support only the create action.
  • To use the index action, you must have the create, index, or write index privilege.
  • To use the delete action, you must have the delete or write index privilege.
  • To use the update action, you must have the index or write index privilege.
  • To automatically create a data stream or index with a bulk API request, you must have the auto_configure, create_index, or manage index privilege.
  • To make the result of a bulk operation visible to search using the refresh parameter, you must have the maintenance or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:

action_and_meta_data\\n
          optional_source\\n
          action_and_meta_data\\n
          optional_source\\n
          ....
          action_and_meta_data\\n
          optional_source\\n
          

The index and create actions expect a source on the next line and have the same semantics as the op_type parameter in the standard index API. A create action fails if a document with the same ID already exists in the target An index action adds or replaces a document as necessary.

NOTE: Data streams support only the create action. To update or delete a document in a data stream, you must target the backing index containing the document.

An update action expects that the partial doc, upsert, and script and its options are specified on the next line.

A delete action does not expect a source on the next line and has the same semantics as the standard delete API.

NOTE: The final line of data must end with a newline character (\\n). Each newline character may be preceded by a carriage return (\\r). When sending NDJSON data to the _bulk endpoint, use a Content-Type header of application/json or application/x-ndjson. Because this format uses literal newline characters (\\n) as delimiters, make sure that the JSON actions and sources are not pretty printed.

If you provide a target in the request path, it is used for any actions that don't explicitly specify an _index argument.

A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only action_meta_data is parsed on the receiving node side.

Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.

There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.

Client suppport for bulk requests

Some of the officially supported clients provide helpers to assist with bulk requests and reindexing:

  • Go: Check out esutil.BulkIndexer
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and Search::Elasticsearch::Client::5_0::Scroll
  • Python: Check out elasticsearch.helpers.*
  • JavaScript: Check out client.helpers.*
  • .NET: Check out BulkAllObservable
  • PHP: Check out bulk indexing.
  • Ruby: Check out Elasticsearch::Helpers::BulkHelper

Submitting bulk requests with cURL

If you're providing text file input to curl, you must use the --data-binary flag instead of plain -d. The latter doesn't preserve newlines. For example:

$ cat requests
          { "index" : { "_index" : "test", "_id" : "1" } }
          { "field1" : "value1" }
          $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
          {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
          

Optimistic concurrency control

Each index and delete action within a bulk API call may include the if_seq_no and if_primary_term parameters in their respective action and meta data lines. The if_seq_no and if_primary_term parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.

Versioning

Each bulk item can include the version value using the version field. It automatically follows the behavior of the index or delete operation based on the _version mapping. It also support the version_type.

Routing

Each bulk item can include the routing value using the routing field. It automatically follows the behavior of the index or delete operation based on the _routing mapping.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Wait for active shards

When making bulk calls, you can set the wait_for_active_shards parameter to require a minimum number of shard copies to be active before starting to process the bulk request.

Refresh

Control when the changes made by this request are visible to search.

NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a _bulk?refresh=wait_for request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the _bulk request at all.

You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests. Refer to the linked documentation for step-by-step instructions using the index settings API.

``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk actions on. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param list_executed_pipelines: If `true`, the response will include the ingest pipelines that were run for each index or create. :param pipeline: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request's actions must target an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param timeout: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. """ if operations is None and body is None: raise ValueError( "Empty value passed for parameters 'operations' and 'body', one of them should be set." ) elif operations is not None and body is not None: raise ValueError("Cannot set both 'operations' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_bulk' else: __path_parts = {} __path = "/_bulk" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if list_executed_pipelines is not None: __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = operations if operations is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="bulk", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scroll_id",), ) async def clear_scroll( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, scroll_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear a scrolling search. Clear the search context and results for a scrolling search.

``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if scroll_id is not None: __body["scroll_id"] = scroll_id if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="clear_scroll", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id",), ) async def close_point_in_time( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close a point in time. A point in time must be opened explicitly before being used in search requests. The keep_alive parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the keep_alive period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

``_ :param id: The ID of the point-in-time. """ if id is None and body is None: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {} __path = "/_pit" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="close_point_in_time", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), ) async def count( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, min_score: t.Optional[float] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, terminate_after: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Count search results. Get the number of documents matching a query.

The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses match_all to count all the documents.

The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_throttled: If `true`, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param min_score: The minimum `_score` value that documents must have to be included in the result. :param preference: The node or shard the operation should be performed on. By default, it is random. :param q: The query in Lucene query string syntax. This parameter cannot be used with a request body. :param query: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. :param routing: A custom value used to route operations to a specific shard. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_count' else: __path_parts = {} __path = "/_count" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if min_score is not None: __query["min_score"] = min_score if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if routing is not None: __query["routing"] = routing if terminate_after is not None: __query["terminate_after"] = terminate_after if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="count", path_parts=__path_parts, ) @_rewrite_parameters( body_name="document", ) async def create( self, *, index: str, id: str, document: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_source_on_error: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a new document in the index.

You can index a new JSON document with the /<target>/_doc/ or /<target>/_create/<_id> APIs Using _create guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the /<target>/_doc/ API.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To add a document using the PUT /<target>/_create/<_id> or POST /<target>/_create/<_id> request formats, you must have the create_doc, create, index, or write index privilege.
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

Automatically create data streams and indices

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed.

Automatic index creation is controlled by the action.auto_create_index setting. If it is true, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow.

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. It does not affect the creation of data streams.

Routing

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Distributed

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

Active shards

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. To alter this behavior per operation, use the wait_for_active_shards request parameter.

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). Specifying a negative value or a number greater than the number of shard copies will throw an error.

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. :param version: The explicit version number for concurrency control. It must be a non-negative long number. :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if document is None and body is None: raise ValueError( "Empty value passed for parameters 'document' and 'body', one of them should be set." ) elif document is not None and body is not None: raise ValueError("Cannot set both 'document' and 'body'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_create/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = document if document is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="create", path_parts=__path_parts, ) @_rewrite_parameters() async def delete( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a document.

Remove a JSON document from the specified index.

NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document.

Optimistic concurrency control

Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

Versioning

Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the index.gc_deletes index setting.

Routing

If routing is used during indexing, the routing value also needs to be specified to delete a document.

If the _routing mapping is set to required and no routing value is specified, the delete API throws a RoutingMissingException and rejects the request.

For example:

DELETE /my-index-000001/_doc/1?routing=shard-1
          

This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified.

Distributed

The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

``_ :param index: The name of the target index. :param id: A unique identifier for the document. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param routing: A custom value used to route operations to a specific shard. :param timeout: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. :param version: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. :param wait_for_active_shards: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="delete", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("max_docs", "query", "slice", "sort"), parameter_aliases={"from": "from_"}, ) async def delete_by_query( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, max_docs: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, routing: t.Optional[str] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, scroll_size: t.Optional[int] = None, search_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, stats: t.Optional[t.Sequence[str]] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[bool] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete documents.

Deletes documents that match the specified query.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

  • read
  • delete or write

You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.

NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.

While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back.

You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than max_docs until it has successfully deleted max_docs documents, or it has gone through every document in the source query.

Throttling delete requests

To control the rate at which delete by query issues batches of delete operations, you can set requests_per_second to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set requests_per_second to -1 to disable throttling.

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. By default the batch size is 1000, so if requests_per_second is set to 500:

target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth".

Slicing

Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts.

Setting slices to auto lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks:

  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • Canceling the request with slices will cancel each sub-request.
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being deleted.
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • Delete performance scales linearly across available resources with the number of slices.

Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.

Cancel a delete by query operation

Any delete by query can be canceled using the task cancel API. For example:

POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
          

The task ID can be found by using the get tasks API.

Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param conflicts: What to do if delete by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to delete. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: A query in the Lucene query string syntax. :param query: The documents to delete specified with Query DSL. :param refresh: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. :param request_cache: If `true`, the request cache is used for this request. Defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. :param routing: A custom value used to route operations to a specific shard. :param scroll: The period to retain the search context for scrolling. :param scroll_size: The size of the scroll request that powers the operation. :param search_timeout: The explicit timeout for each search request. It defaults to no timeout. :param search_type: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A sort object that specifies the order of deleted documents. :param stats: The specific `tag` of the request for logging and statistical purposes. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. :param timeout: The period each deletion request waits for active shards. :param version: If `true`, returns the document version as part of a hit. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. :param wait_for_completion: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_delete_by_query' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if conflicts is not None: __query["conflicts"] = conflicts if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: __query["request_cache"] = request_cache if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if scroll_size is not None: __query["scroll_size"] = scroll_size if search_timeout is not None: __query["search_timeout"] = search_timeout if search_type is not None: __query["search_type"] = search_type if slices is not None: __query["slices"] = slices if stats is not None: __query["stats"] = stats if terminate_after is not None: __query["terminate_after"] = terminate_after if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if max_docs is not None: __body["max_docs"] = max_docs if query is not None: __body["query"] = query if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="delete_by_query", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_by_query_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle a delete by query operation.

Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_delete_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="delete_by_query_rethrottle", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_script( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a script or search template. Deletes a stored script or search template.

``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="delete_script", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def exists( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> HeadApiResponse: """ .. raw:: html

Check a document.

Verify that a document exists. For example, check to see if a document with the _id 0 exists:

HEAD my-index-000001/_doc/0
          

If the document exists, the API returns a status code of 200 - OK. If the document doesn’t exist, the API returns 404 - Not Found.

Versioning support

You can use the version parameter to check the document only if its current version is equal to the specified one.

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data.

``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). :param id: A unique document identifier. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. :param version: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="exists", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def exists_source( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> HeadApiResponse: """ .. raw:: html

Check for a document source.

Check whether a document source exists in an index. For example:

HEAD my-index-000001/_source/1
          

A document's source is not available if it is disabled in the mapping.

``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). :param id: A unique identifier for the document. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_source/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="exists_source", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def explain( self, *, index: str, id: str, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, lenient: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document.

``_ :param index: Index names that are used to limit the request. Only a single index name can be provided to this parameter. :param id: The document identifier. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: The query in the Lucene query string syntax. :param query: Defines the search definition using the Query DSL. :param routing: A custom value used to route operations to a specific shard. :param source: `True` or `false` to return the `_source` field or not or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return in the response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_explain/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if lenient is not None: __query["lenient"] = lenient if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="explain", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("fields", "index_filter", "runtime_mappings"), ) async def field_caps( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, filters: t.Optional[str] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_empty_fields: t.Optional[bool] = None, include_unmapped: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, types: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the field capabilities.

Get information about the capabilities of fields among multiple indices.

For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fields: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. :param filters: A comma-separated list of filters to apply to the response. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param include_empty_fields: If false, empty fields are not included in the response. :param include_unmapped: If true, unmapped fields are included in the response. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. :param runtime_mappings: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. :param types: A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_field_caps' else: __path_parts = {} __path = "/_field_caps" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if filters is not None: __query["filters"] = filters if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_empty_fields is not None: __query["include_empty_fields"] = include_empty_fields if include_unmapped is not None: __query["include_unmapped"] = include_unmapped if pretty is not None: __query["pretty"] = pretty if types is not None: __query["types"] = types if not __body: if fields is not None: __body["fields"] = fields if index_filter is not None: __body["index_filter"] = index_filter if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="field_caps", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def get( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a document by its ID.

Get a document and its source or stored fields from an index.

By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the stored_fields parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the realtime parameter to false.

Source filtering

By default, the API returns the contents of the _source field unless you have used the stored_fields parameter or the _source field is turned off. You can turn off _source retrieval by using the _source parameter:

GET my-index-000001/_doc/0?_source=false
          

If you only need one or two fields from the _source, use the _source_includes or _source_excludes parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example:

GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
          

If you only want to specify includes, you can use a shorter notation:

GET my-index-000001/_doc/0?_source=*.id
          

Routing

If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example:

GET my-index-000001/_doc/2?routing=user1
          

This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified.

Distributed

The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be.

Versioning support

You can use the version parameter to retrieve the document only if its current version is equal to the specified one.

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data.

``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. :param force_synthetic_source: Indicates whether the request forces synthetic `_source`. Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get", path_parts=__path_parts, ) @_rewrite_parameters() async def get_script( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a script or search template. Retrieves a stored script or search template.

``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script", path_parts=__path_parts, ) @_rewrite_parameters() async def get_script_context( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get script contexts.

Get a list of supported script contexts and their methods.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script_context", path_parts=__path_parts, ) @_rewrite_parameters() async def get_script_languages( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get script languages.

Get a list of available script types, languages, and contexts.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script_languages", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def get_source( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a document's source.

Get the source of a document. For example:

GET my-index-000001/_source/1
          

You can use the source filtering parameters to control which parts of the _source are returned:

GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
          
``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_source/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_source", path_parts=__path_parts, ) @_rewrite_parameters() async def health_report( self, *, feature: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality.

Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status.

The cluster’s status is controlled by the worst indicator status.

In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system.

Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem.

NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

``_ :param feature: A feature of the cluster, as returned by the top-level health report API. :param size: Limit the number of affected resources the health report API returns. :param timeout: Explicit operation timeout. :param verbose: Opt-in for more information about the health of the system. """ __path_parts: t.Dict[str, str] if feature not in SKIP_IN_PATH: __path_parts = {"feature": _quote(feature)} __path = f'/_health_report/{__path_parts["feature"]}' else: __path_parts = {} __path = "/_health_report" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if timeout is not None: __query["timeout"] = timeout if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="health_report", path_parts=__path_parts, ) @_rewrite_parameters( body_name="document", ) async def index( self, *, index: str, document: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a document in an index.

Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version.

NOTE: You cannot use this API to send update requests for existing documents in a data stream.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To add or overwrite a document using the PUT /<target>/_doc/<_id> request format, you must have the create, index, or write index privilege.
  • To add a document using the POST /<target>/_doc/ request format, you must have the create_doc, create, index, or write index privilege.
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set wait_for_active_shards to change this default behavior.

Automatically create data streams and indices

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed.

Automatic index creation is controlled by the action.auto_create_index setting. If it is true, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow.

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. It does not affect the creation of data streams.

Optimistic concurrency control

Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

Routing

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Distributed

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

Active shards

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. To alter this behavior per operation, use the wait_for_active_shards request parameter.

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). Specifying a negative value or a number greater than the number of shard copies will throw an error.

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

No operation (noop) updates

When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the _update API with detect_noop set to true. The detect_noop option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.

There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.

Versioning

Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, version_type should be set to external. The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18.

NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks.

When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:

PUT my-index-000001/_doc/1?version=2&version_type=external
          {
            "user": {
              "id": "elkbee"
            }
          }

          In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
          If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).

          A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
          Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
          
``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. :param document: :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. :param version: An explicit version number for concurrency control. It must be a non-negative long number. :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if document is None and body is None: raise ValueError( "Empty value passed for parameters 'document' and 'body', one of them should be set." ) elif document is not None and body is not None: raise ValueError("Cannot set both 'document' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and id not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __method = "PUT" elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_doc' __method = "POST" else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if op_type is not None: __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = document if document is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] __method, __path, params=__query, headers=__headers, body=__body, endpoint_id="index", path_parts=__path_parts, ) @_rewrite_parameters() async def info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster info. Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "ids"), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def mget( self, *, index: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, human: t.Optional[bool] = None, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get multiple documents.

Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

Filter source fields

By default, the _source field is returned for every document (if stored). Use the _source and _source_include or source_exclude attributes to filter what fields are returned for a particular document. You can include the _source, _source_includes, and _source_excludes query parameters in the request URI to specify the defaults to use when there are no per-document instructions.

Get stored fields

Use the stored_fields attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. :param docs: The documents you want to retrieve. Required if no index is specified in the request URI. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. :param ids: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes relevant shards before retrieving documents. :param routing: Custom value used to route operations to a specific shard. :param source: True or false to return the `_source` field or not, or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: If `true`, retrieves the document fields stored in the index rather than the document `_source`. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mget' else: __path_parts = {} __path = "/_mget" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if not __body: if docs is not None: __body["docs"] = docs if ids is not None: __body["ids"] = ids __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="mget", path_parts=__path_parts, ) @_rewrite_parameters( body_name="searches", ) async def msearch( self, *, searches: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple searches.

The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows:

header\\n
          body\\n
          header\\n
          body\\n
          

This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.

IMPORTANT: The final line of data must end with a newline character \\n. Each newline character may be preceded by a carriage return \\r. When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases to search. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param include_named_queries_score: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. :param rest_total_hits_as_int: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. :param routing: Custom routing value used to route search operations to a specific shard. :param search_type: Indicates whether global term and document frequencies should be used when scoring returned documents. :param typed_keys: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. """ if searches is None and body is None: raise ValueError( "Empty value passed for parameters 'searches' and 'body', one of them should be set." ) elif searches is not None and body is not None: raise ValueError("Cannot set both 'searches' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_msearch' else: __path_parts = {} __path = "/_msearch" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_named_queries_score is not None: __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys __body = searches if searches is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="msearch", path_parts=__path_parts, ) @_rewrite_parameters( body_name="search_templates", ) async def msearch_template( self, *, search_templates: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple templated searches.

Run multiple templated searches with a single request. If you are providing a text file or text input to curl, use the --data-binary flag instead of -d to preserve newlines. For example:

$ cat requests
          { "index": "my-index" }
          { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
          { "index": "my-other-index" }
          { "id": "my-other-search-template", "params": { "query_type": "match_all" }}

          $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
          
``_ :param search_templates: :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. :param max_concurrent_searches: The maximum number of concurrent searches the API can run. :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. :param search_type: The type of the search operation. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ if search_templates is None and body is None: raise ValueError( "Empty value passed for parameters 'search_templates' and 'body', one of them should be set." ) elif search_templates is not None and body is not None: raise ValueError("Cannot set both 'search_templates' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_msearch/template' else: __path_parts = {} __path = "/_msearch/template" __query: t.Dict[str, t.Any] = {} if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys __body = search_templates if search_templates is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="msearch_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "ids"), ) async def mtermvectors( self, *, index: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, field_statistics: t.Optional[bool] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ids: t.Optional[t.Sequence[str]] = None, offsets: t.Optional[bool] = None, payloads: t.Optional[bool] = None, positions: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, routing: t.Optional[str] = None, term_statistics: t.Optional[bool] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get multiple term vectors.

Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a docs array with all the fetched termvectors. Each element has the structure provided by the termvectors API.

Artificial documents

You can also use mtermvectors to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified _index.

``_ :param index: The name of the index that contains the documents. :param docs: An array of existing or artificial documents. :param field_statistics: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. :param fields: A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. :param ids: A simplified syntax to specify documents by their ID if they're in the same index. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param positions: If `true`, the response includes term positions. :param preference: The node or shard the operation should be performed on. It is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. :param routing: A custom value used to route operations to a specific shard. :param term_statistics: If true, the response includes term frequency and document frequency. :param version: If `true`, returns the document version as part of a hit. :param version_type: The version type. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mtermvectors' else: __path_parts = {} __path = "/_mtermvectors" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if field_statistics is not None: __query["field_statistics"] = field_statistics if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if offsets is not None: __query["offsets"] = offsets if payloads is not None: __query["payloads"] = payloads if positions is not None: __query["positions"] = positions if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if routing is not None: __query["routing"] = routing if term_statistics is not None: __query["term_statistics"] = term_statistics if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if not __body: if docs is not None: __body["docs"] = docs if ids is not None: __body["ids"] = ids if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="mtermvectors", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_filter",), ) async def open_point_in_time( self, *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open a point in time.

A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between search_after requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time.

A point in time must be opened explicitly before being used in search requests.

A subsequent search request with the pit parameter must not specify index, routing, or preference values as these parameters are copied from the point in time.

Just like regular searches, you can use from and size to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with search_after.

IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.

When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a NoShardAvailableActionException exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.

Keeping point in time alive

The keep_alive parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request.

Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use.

TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles.

Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extend the length of time that the point in time persists. :param allow_partial_search_results: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if keep_alive is None and body is None: raise ValueError("Empty value passed for parameter 'keep_alive'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_pit' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing if not __body: if index_filter is not None: __body["index_filter"] = index_filter if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="open_point_in_time", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("script",), ) async def put_script( self, *, id: str, script: t.Optional[t.Mapping[str, t.Any]] = None, context: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a script or search template. Creates or updates a stored script or search template.

``_ :param id: The identifier for the stored script or search template. It must be unique within the cluster. :param script: The script or search template, its parameters, and its language. :param context: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if script is None and body is None: raise ValueError("Empty value passed for parameter 'script'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH and context not in SKIP_IN_PATH: __path_parts = {"id": _quote(id), "context": _quote(context)} __path = f'/_scripts/{__path_parts["id"]}/{__path_parts["context"]}' elif id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if script is not None: __body["script"] = script __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="put_script", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("requests", "metric"), ) async def rank_eval( self, *, requests: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, metric: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, search_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate ranked search results.

Evaluate the quality of ranked search results over a set of typical search queries.

``_ :param requests: A set of typical search requests, together with their provided ratings. :param index: A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param metric: Definition of the evaluation metric to calculate. :param search_type: Search operation type """ if requests is None and body is None: raise ValueError("Empty value passed for parameter 'requests'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rank_eval' else: __path_parts = {} __path = "/_rank_eval" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if search_type is not None: __query["search_type"] = search_type if not __body: if requests is not None: __body["requests"] = requests if metric is not None: __body["metric"] = metric __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="rank_eval", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("dest", "source", "conflicts", "max_docs", "script", "size"), ) async def reindex( self, *, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_docs: t.Optional[int] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, require_alias: t.Optional[bool] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, size: t.Optional[int] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reindex documents.

Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself.

IMPORTANT: Reindex requires _source to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time.

If the Elasticsearch security features are enabled, you must have the following security privileges:

  • The read index privilege for the source data stream, index, or alias.
  • The write index privilege for the destination data stream, index, or index alias.
  • To automatically create a data stream or index with a reindex API request, you must have the auto_configure, create_index, or manage index privilege for the destination data stream, index, or alias.
  • If reindexing from a remote cluster, the source.remote.user must have the monitor cluster privilege and the read index privilege for the source data stream, index, or alias.

If reindexing from a remote cluster, you must explicitly allow the remote host in the reindex.remote.whitelist setting. Automatic data stream creation requires a matching index template with data stream enabled.

The dest element can be configured like the index API to control optimistic concurrency control. Omitting version_type or setting it to internal causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.

Setting version_type to external causes Elasticsearch to preserve the version from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.

Setting op_type to create causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict.

IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an op_type of create. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream.

By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the conflicts request body property to proceed. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the conflicts property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs until it has successfully indexed max_docs documents into the target or it has gone through every document in the source query.

It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.

  • When requested with wait_for_completion=true (default), the request fails if the node shuts down.
  • When requested with wait_for_completion=false, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down. When retrying a failed reindex operation, it might be necessary to set conflicts=proceed or to first delete the partial destination index. Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.

Refer to the linked documentation for examples of how to reindex documents.

``_ :param dest: The destination you are copying to. :param source: The source you are copying from. :param conflicts: Indicates whether to continue reindexing even when there are conflicts. :param max_docs: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. :param refresh: If `true`, the request refreshes affected shards to make this operation visible to search. :param requests_per_second: The throttle for this request in sub-requests per second. By default, there is no throttle. :param require_alias: If `true`, the destination must be an index alias. :param script: The script to run to update the document source or metadata when reindexing. :param scroll: The period of time that a consistent view of the index should be maintained for scrolled search. :param size: :param slices: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. :param timeout: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {} __path = "/_reindex" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if require_alias is not None: __query["require_alias"] = require_alias if scroll is not None: __query["scroll"] = scroll if slices is not None: __query["slices"] = slices if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if conflicts is not None: __body["conflicts"] = conflicts if max_docs is not None: __body["max_docs"] = max_docs if script is not None: __body["script"] = script if size is not None: __body["size"] = size __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="reindex", path_parts=__path_parts, ) @_rewrite_parameters() async def reindex_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle a reindex operation.

Change the number of requests per second for a particular reindex operation. For example:

POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
          

Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts.

``_ :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_reindex/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="reindex_rethrottle", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("file", "params", "source"), ignore_deprecated_options={"params"}, ) async def render_search_template( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, file: t.Optional[str] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Render a search template.

Render a search template as a search request body.

``_ :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. :param file: :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. :param source: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_render/template/{__path_parts["id"]}' else: __path_parts = {} __path = "/_render/template" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if file is not None: __body["file"] = file if params is not None: __body["params"] = params if source is not None: __body["source"] = source if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="render_search_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("context", "context_setup", "script"), ) @_stability_warning(Stability.EXPERIMENTAL) async def scripts_painless_execute( self, *, context: t.Optional[ t.Union[ str, t.Literal[ "boolean_field", "composite_field", "date_field", "double_field", "filter", "geo_point_field", "ip_field", "keyword_field", "long_field", "painless_test", "score", ], ] ] = None, context_setup: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a script.

Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.

The API uses several contexts, which control how scripts are run, what variables are available at runtime, and what the return type is.

Each context requires a script, but additional parameters depend on the context you're using for that script.

``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. :param context_setup: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. :param script: The Painless script to run. """ __path_parts: t.Dict[str, str] = {} __path = "/_scripts/painless/_execute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if context is not None: __body["context"] = context if context_setup is not None: __body["context_setup"] = context_setup if script is not None: __body["script"] = script if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="scripts_painless_execute", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scroll_id", "scroll"), ) async def scroll( self, *, scroll_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a scrolling search.

IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the search_after parameter with a point in time (PIT).

The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the scroll query parameter. The scroll parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the _scroll_id response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.

You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.

IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

``_ :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. :param scroll: The period to retain the search context for scrolling. """ if scroll_id is None and body is None: raise ValueError("Empty value passed for parameter 'scroll_id'") __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if not __body: if scroll_id is not None: __body["scroll_id"] = scroll_id if scroll is not None: __body["scroll"] = scroll if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="scroll", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) async def search( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rank: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, retriever: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search.

Get search hits that match the query defined in the request. You can provide search queries using the q query string parameter or the request body. If both are specified, only the query parameter is used.

If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the read index privilege for the alias's data streams or indices.

Search slicing

When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the slice and pit properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs.

For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.

IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param aggregations: Defines the aggregations that are run as part of the search request. :param aggs: Defines the aggregations that are run as part of the search request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param allow_partial_search_results: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param batched_reduce_size: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. :param ccs_minimize_roundtrips: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. :param collapse: Collapses search results the values of the specified field. :param default_operator: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param docvalue_fields: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`. :param explain: If `true`, the request returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. :param from_: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param highlight: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. :param ignore_throttled: If `true`, concrete, expanded or aliased indices will be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_named_queries_score: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. :param indices_boost: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. :param knn: The approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_concurrent_shard_requests: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. :param min_score: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. :param pre_filter_shard_size: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. :param preference: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. :param q: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. :param query: The search definition using the Query DSL. :param rank: The Reciprocal Rank Fusion (RRF) to use. :param request_cache: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. :param rescore: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. :param retriever: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. :param routing: A custom value that is used to route operations to a specific shard. :param runtime_mappings: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param scroll: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. :param search_after: Used to retrieve the next page of hits using a set of sort values from the previous page. :param search_type: Indicates how distributed term frequencies are calculated for relevance scoring. :param seq_no_primary_term: If `true`, the request returns sequence number and primary term of the last modification of each hit. :param size: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. :param slice: Split a scrolled search into multiple slices that can be consumed independently. :param sort: A comma-separated list of : pairs. :param source: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stats: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. :param suggest: Defines a suggester that provides similar looking terms based on a provided text. :param suggest_field: The field to use for suggestions. :param suggest_mode: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_size: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_text: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. :param timeout: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If `true`, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param typed_keys: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. :param version: If `true`, the request returns the document version as part of a hit. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search' else: __path_parts = {} __path = "/_search" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_named_queries_score is not None: __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if knn is not None: __body["knn"] = knn if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rank is not None: __body["rank"] = rank if rescore is not None: __body["rescore"] = rescore if retriever is not None: __body["retriever"] = retriever if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggs", "buffer", "exact_bounds", "extent", "fields", "grid_agg", "grid_precision", "grid_type", "query", "runtime_mappings", "size", "sort", "track_total_hits", "with_labels", ), ) async def search_mvt( self, *, index: t.Union[str, t.Sequence[str]], field: str, zoom: int, x: int, y: int, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, buffer: t.Optional[int] = None, error_trace: t.Optional[bool] = None, exact_bounds: t.Optional[bool] = None, extent: t.Optional[int] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, grid_agg: t.Optional[t.Union[str, t.Literal["geohex", "geotile"]]] = None, grid_precision: t.Optional[int] = None, grid_type: t.Optional[ t.Union[str, t.Literal["centroid", "grid", "point"]] ] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, with_labels: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ .. raw:: html

Search a vector tile.

Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile.

Internally, Elasticsearch translates a vector tile search API request into a search containing:

  • A geo_bounding_box query on the <field>. The query uses the <zoom>/<x>/<y> tile as a bounding box.
  • A geotile_grid or geohex_grid aggregation on the <field>. The grid_agg parameter determines the aggregation type. The aggregation uses the <zoom>/<x>/<y> tile as a bounding box.
  • Optionally, a geo_bounds aggregation on the <field>. The search only includes this aggregation if the exact_bounds parameter is true.
  • If the optional parameter with_labels is true, the internal search will include a dynamic runtime field that calls the getLabelPosition function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.

The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:

  • A hits layer containing a feature for each <field> value matching the geo_bounding_box query.
  • An aggs layer containing a feature for each cell of the geotile_grid or geohex_grid. The layer only contains features for cells with matching data.
  • A meta layer containing:
    • A feature containing a bounding box. By default, this is the bounding box of the tile.
    • Value ranges for any sub-aggregations on the geotile_grid or geohex_grid.
    • Metadata for the search.

The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON.

IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence.

Grid precision for geotile

For a grid_agg of geotile, you can use cells in the aggs layer as tiles for lower zoom levels. grid_precision represents the additional zoom levels available through these cells. The final precision is computed by as follows: <zoom> + grid_precision. For example, if <zoom> is 7 and grid_precision is 8, then the geotile_grid aggregation will use a precision of 15. The maximum final precision is 29. The grid_precision also determines the number of cells for the grid as follows: (2^grid_precision) x (2^grid_precision). For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The aggs layer only contains features for cells with matching data.

Grid precision for geohex

For a grid_agg of geohex, Elasticsearch uses <zoom> and grid_precision to calculate a final precision as follows: <zoom> + grid_precision.

This precision determines the H3 resolution of the hexagonal cells produced by the geohex aggregation. The following table maps the H3 resolution for each precision. For example, if <zoom> is 3 and grid_precision is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If <zoom> is 3 and grid_precision is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3.

Precision Unique tile bins H3 resolution Unique hex bins Ratio
1 4 0 122 30.5
2 16 0 122 7.625
3 64 1 842 13.15625
4 256 1 842 3.2890625
5 1024 2 5882 5.744140625
6 4096 2 5882 1.436035156
7 16384 3 41162 2.512329102
8 65536 3 41162 0.6280822754
9 262144 4 288122 1.099098206
10 1048576 4 288122 0.2747745514
11 4194304 5 2016842 0.4808526039
12 16777216 6 14117882 0.8414913416
13 67108864 6 14117882 0.2103728354
14 268435456 7 98825162 0.3681524172
15 1073741824 8 691776122 0.644266719
16 4294967296 8 691776122 0.1610666797
17 17179869184 9 4842432842 0.2818666889
18 68719476736 10 33897029882 0.4932667053
19 274877906944 11 237279209162 0.8632167343
20 1099511627776 11 237279209162 0.2158041836
21 4398046511104 12 1660954464122 0.3776573213
22 17592186044416 13 11626681248842 0.6609003122
23 70368744177664 13 11626681248842 0.165225078
24 281474976710656 14 81386768741882 0.2891438866
25 1125899906842620 15 569707381193162 0.5060018015
26 4503599627370500 15 569707381193162 0.1265004504
27 18014398509482000 15 569707381193162 0.03162511259
28 72057594037927900 15 569707381193162 0.007906278149
29 288230376151712000 15 569707381193162 0.001976569537

Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

Learn how to use the vector tile search API with practical examples in the Vector tile search examples guide.

``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return :param zoom: Zoom level for the vector tile to search :param x: X coordinate for the vector tile to search :param y: Y coordinate for the vector tile to search :param aggs: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. :param buffer: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. :param exact_bounds: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. :param extent: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. :param fields: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. :param grid_agg: The aggregation used to create a grid for the `field`. :param grid_precision: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. :param grid_type: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. :param query: The query DSL used to filter documents for the search. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param size: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. :param sort: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. :param track_total_hits: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param with_labels: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if field in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'field'") if zoom in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'zoom'") if x in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'x'") if y in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'y'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "field": _quote(field), "zoom": _quote(zoom), "x": _quote(x), "y": _quote(y), } __path = f'/{__path_parts["index"]}/_mvt/{__path_parts["field"]}/{__path_parts["zoom"]}/{__path_parts["x"]}/{__path_parts["y"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if aggs is not None: __body["aggs"] = aggs if buffer is not None: __body["buffer"] = buffer if exact_bounds is not None: __body["exact_bounds"] = exact_bounds if extent is not None: __body["extent"] = extent if fields is not None: __body["fields"] = fields if grid_agg is not None: __body["grid_agg"] = grid_agg if grid_precision is not None: __body["grid_precision"] = grid_precision if grid_type is not None: __body["grid_type"] = grid_type if query is not None: __body["query"] = query if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if with_labels is not None: __body["with_labels"] = with_labels if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/vnd.mapbox-vector-tile"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_mvt", path_parts=__path_parts, ) @_rewrite_parameters() async def search_shards( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the search shards.

Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section.

If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. :param preference: The node or shard the operation should be performed on. It is random by default. :param routing: A custom value used to route operations to a specific shard. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search_shards' else: __path_parts = {} __path = "/_search_shards" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="search_shards", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("explain", "id", "params", "profile", "source"), ignore_deprecated_options={"params"}, ) async def search_template( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search with a search template.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. :param id: The ID of the search template to use. If no `source` is specified, this parameter is required. :param ignore_throttled: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. :param preference: The node or shard the operation should be performed on. It is random by default. :param profile: If `true`, the query execution is profiled. :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. :param routing: A custom value used to route operations to a specific shard. :param scroll: Specifies how long a consistent view of the index should be maintained for scrolled search. :param search_type: The type of the search operation. :param source: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search/template' else: __path_parts = {} __path = "/_search/template" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if explain is not None: __body["explain"] = explain if id is not None: __body["id"] = id if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if source is not None: __body["source"] = source __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "field", "case_insensitive", "index_filter", "search_after", "size", "string", "timeout", ), ) async def terms_enum( self, *, index: str, field: t.Optional[str] = None, case_insensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, search_after: t.Optional[str] = None, size: t.Optional[int] = None, string: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get terms in an index.

Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios.

info The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

``_ :param index: A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. :param field: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. :param case_insensitive: When `true`, the provided search string is matched against index terms without case sensitivity. :param index_filter: Filter an index shard if the provided query rewrites to `match_none`. :param search_after: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. :param size: The number of matching terms to return. :param string: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. :param timeout: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if field is None and body is None: raise ValueError("Empty value passed for parameter 'field'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_terms_enum' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if field is not None: __body["field"] = field if case_insensitive is not None: __body["case_insensitive"] = case_insensitive if index_filter is not None: __body["index_filter"] = index_filter if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if string is not None: __body["string"] = string if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="terms_enum", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "doc", "field_statistics", "fields", "filter", "offsets", "payloads", "per_field_analyzer", "positions", "routing", "term_statistics", "version", "version_type", ), ) async def termvectors( self, *, index: str, id: t.Optional[str] = None, doc: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, field_statistics: t.Optional[bool] = None, fields: t.Optional[t.Sequence[str]] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, offsets: t.Optional[bool] = None, payloads: t.Optional[bool] = None, per_field_analyzer: t.Optional[t.Mapping[str, str]] = None, positions: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, routing: t.Optional[str] = None, term_statistics: t.Optional[bool] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get term vector information.

Get information and statistics about terms in the fields of a particular document.

You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the fields parameter or by adding the fields to the request body. For example:

GET /my-index-000001/_termvectors/1?fields=message
          

Fields can be specified using wildcards, similar to the multi match query.

Term vectors are real-time by default, not near real-time. This can be changed by setting realtime parameter to false.

You can request three types of values: term information, term statistics, and field statistics. By default, all term information and field statistics are returned for all fields but term statistics are excluded.

Term information

  • term frequency in the field (always returned)
  • term positions (positions: true)
  • start and end offsets (offsets: true)
  • term payloads (payloads: true), as base64 encoded bytes

If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.

warn Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.

Behaviour

The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use routing only to hit a particular shard. Refer to the linked documentation for detailed examples of how to use this API.

``_ :param index: The name of the index that contains the document. :param id: A unique identifier for the document. :param doc: An artificial document (a document not present in the index) for which you want to retrieve term vectors. :param field_statistics: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). :param fields: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. :param filter: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param per_field_analyzer: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. :param positions: If `true`, the response includes term positions. :param preference: The node or shard the operation should be performed on. It is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. :param routing: A custom value that is used to route operations to a specific shard. :param term_statistics: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. :param version: If `true`, returns the document version as part of a hit. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and id not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_termvectors/{__path_parts["id"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_termvectors' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if not __body: if doc is not None: __body["doc"] = doc if field_statistics is not None: __body["field_statistics"] = field_statistics if fields is not None: __body["fields"] = fields if filter is not None: __body["filter"] = filter if offsets is not None: __body["offsets"] = offsets if payloads is not None: __body["payloads"] = payloads if per_field_analyzer is not None: __body["per_field_analyzer"] = per_field_analyzer if positions is not None: __body["positions"] = positions if routing is not None: __body["routing"] = routing if term_statistics is not None: __body["term_statistics"] = term_statistics if version is not None: __body["version"] = version if version_type is not None: __body["version_type"] = version_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="termvectors", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "detect_noop", "doc", "doc_as_upsert", "script", "scripted_upsert", "source", "upsert", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) async def update( self, *, index: str, id: str, detect_noop: t.Optional[bool] = None, doc: t.Optional[t.Mapping[str, t.Any]] = None, doc_as_upsert: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, lang: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, retry_on_conflict: t.Optional[int] = None, routing: t.Optional[str] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scripted_upsert: t.Optional[bool] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, upsert: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a document.

Update a document by running a script or passing a partial document.

If the Elasticsearch security features are enabled, you must have the index or write index privilege for the target index or index alias.

The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation:

  • Gets the document (collocated with the shard) from the index.
  • Runs the specified script.
  • Indexes the result.

The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.

The _source field must be enabled to use this API. In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp). For usage examples such as partial updates, upserts, and scripted updates, see the External documentation.

``_ :param index: The name of the target index. By default, the index is created automatically if it doesn't exist. :param id: A unique identifier for the document to be updated. :param detect_noop: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. :param doc: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. :param doc_as_upsert: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param retry_on_conflict: The number of times the operation should be retried when a conflict occurs. :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document. :param scripted_upsert: If `true`, run the script whether or not the document exists. :param source: If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. :param source_excludes: The source fields you want to exclude. :param source_includes: The source fields you want to retrieve. :param timeout: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. :param upsert: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. :param wait_for_active_shards: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_update/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if lang is not None: __query["lang"] = lang if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if retry_on_conflict is not None: __query["retry_on_conflict"] = retry_on_conflict if routing is not None: __query["routing"] = routing if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if detect_noop is not None: __body["detect_noop"] = detect_noop if doc is not None: __body["doc"] = doc if doc_as_upsert is not None: __body["doc_as_upsert"] = doc_as_upsert if script is not None: __body["script"] = script if scripted_upsert is not None: __body["scripted_upsert"] = scripted_upsert if source is not None: __body["_source"] = source if upsert is not None: __body["upsert"] = upsert __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="update", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("conflicts", "max_docs", "query", "script", "slice"), parameter_aliases={"from": "from_"}, ) async def update_by_query( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, max_docs: t.Optional[int] = None, pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, routing: t.Optional[str] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, scroll_size: t.Optional[int] = None, search_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, sort: t.Optional[t.Sequence[str]] = None, stats: t.Optional[t.Sequence[str]] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[bool] = None, version_type: t.Optional[bool] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

  • read
  • index or write

You can specify the query criteria in the request URI or the request body using the same syntax as the search API.

When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than max_docs until it has successfully updated max_docs documents or it has gone through every document in the source query.

NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.

While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back.

Refreshing shards

Specifying the refresh parameter refreshes all shards once the request completes. This is different to the update API's refresh parameter, which causes only the shard that received the request to be refreshed. Unlike the update API, it does not support wait_for.

Running update by query asynchronously

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at .tasks/task/${taskId}.

Waiting for active shards

wait_for_active_shards controls how many copies of a shard must be active before proceeding with the request. See wait_for_active_shards for details. timeout controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the Bulk API. Update by query uses scrolled searches, so you can also specify the scroll parameter to control how long it keeps the search context alive, for example ?scroll=10m. The default is 5 minutes.

Throttling update requests

To control the rate at which update by query issues batches of update operations, you can set requests_per_second to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set requests_per_second to -1 to turn off throttling.

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. By default the batch size is 1000, so if requests_per_second is set to 500:

target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth".

Slicing

Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts.

Setting slices to auto chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.

Adding slices to _update_by_query just automates the manual process of creating sub-requests, which means it has some quirks:

  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • Canceling the request with slices will cancel each sub-request.
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being updated.
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • Update performance scales linearly across available resources with the number of slices.

Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. Refer to the linked documentation for examples of how to update documents using the _update_by_query API:

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param conflicts: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to update. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: A query in the Lucene query string syntax. :param query: The documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. :param request_cache: If `true`, the request cache is used for this request. It defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document source or metadata when updating. :param scroll: The period to retain the search context for scrolling. :param scroll_size: The size of the scroll request that powers the operation. :param search_timeout: An explicit timeout for each search request. By default, there is no timeout. :param search_type: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A comma-separated list of : pairs. :param stats: The specific `tag` of the request for logging and statistical purposes. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. :param timeout: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. :param version: If `true`, returns the document version as part of a hit. :param version_type: Should the document increment the version number (internal) on hit or not (reindex) :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. :param wait_for_completion: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_update_by_query' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if pipeline is not None: __query["pipeline"] = pipeline if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: __query["request_cache"] = request_cache if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if scroll_size is not None: __query["scroll_size"] = scroll_size if search_timeout is not None: __query["search_timeout"] = search_timeout if search_type is not None: __query["search_type"] = search_type if slices is not None: __query["slices"] = slices if sort is not None: __query["sort"] = sort if stats is not None: __query["stats"] = stats if terminate_after is not None: __query["terminate_after"] = terminate_after if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if conflicts is not None: __body["conflicts"] = conflicts if max_docs is not None: __body["max_docs"] = max_docs if query is not None: __body["query"] = query if script is not None: __body["script"] = script if slice is not None: __body["slice"] = slice if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="update_by_query", path_parts=__path_parts, ) @_rewrite_parameters() async def update_by_query_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle an update by query operation.

Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_update_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="update_by_query_rethrottle", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/_base.py000066400000000000000000000362061506101734100246470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import re import warnings from typing import ( Any, Callable, Collection, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from elastic_transport import ( ApiResponse, AsyncTransport, BinaryApiResponse, HeadApiResponse, HttpHeaders, ListApiResponse, NodeConfig, ObjectApiResponse, OpenTelemetrySpan, SniffOptions, TextApiResponse, ) from elastic_transport.client_utils import DEFAULT, DefaultType from ..._otel import OpenTelemetry from ..._version import __versionstr__ from ...compat import warn_stacklevel from ...exceptions import ( HTTP_EXCEPTIONS, ApiError, ConnectionError, ElasticsearchWarning, SerializationError, UnsupportedProductError, ) from .utils import _TYPE_ASYNC_SNIFF_CALLBACK, _base64_auth_header, _quote_query _WARNING_RE = re.compile(r"\"([^\"]*)\"") _COMPAT_MIMETYPE_TEMPLATE = "application/vnd.elasticsearch+%s; compatible-with=" + str( __versionstr__.partition(".")[0] ) _COMPAT_MIMETYPE_RE = re.compile(r"application/(json|x-ndjson|vnd\.mapbox-vector-tile)") _COMPAT_MIMETYPE_SUB = _COMPAT_MIMETYPE_TEMPLATE % (r"\g<1>",) def resolve_auth_headers( headers: Optional[Mapping[str, str]], http_auth: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, api_key: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, basic_auth: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, bearer_auth: Union[DefaultType, None, str] = DEFAULT, ) -> HttpHeaders: if headers is None: headers = HttpHeaders() elif not isinstance(headers, HttpHeaders): headers = HttpHeaders(headers) resolved_http_auth = http_auth if http_auth is not DEFAULT else None resolved_basic_auth = basic_auth if basic_auth is not DEFAULT else None if resolved_http_auth is not None: if resolved_basic_auth is not None: raise ValueError( "Can't specify both 'http_auth' and 'basic_auth', " "instead only specify 'basic_auth'" ) if isinstance(http_auth, str) or ( isinstance(resolved_http_auth, (list, tuple)) and all(isinstance(x, str) for x in resolved_http_auth) ): resolved_basic_auth = resolved_http_auth else: raise TypeError( "The deprecated 'http_auth' parameter must be either 'Tuple[str, str]' or 'str'. " "Use either the 'basic_auth' parameter instead" ) warnings.warn( "The 'http_auth' parameter is deprecated. " "Use 'basic_auth' or 'bearer_auth' parameters instead", category=DeprecationWarning, stacklevel=warn_stacklevel(), ) resolved_api_key = api_key if api_key is not DEFAULT else None resolved_bearer_auth = bearer_auth if bearer_auth is not DEFAULT else None if resolved_api_key or resolved_basic_auth or resolved_bearer_auth: if ( sum( x is not None for x in ( resolved_api_key, resolved_basic_auth, resolved_bearer_auth, ) ) > 1 ): raise ValueError( "Can only set one of 'api_key', 'basic_auth', and 'bearer_auth'" ) if headers and headers.get("authorization", None) is not None: raise ValueError( "Can't set 'Authorization' HTTP header with other authentication options" ) if resolved_api_key: headers["authorization"] = f"ApiKey {_base64_auth_header(resolved_api_key)}" if resolved_basic_auth: headers["authorization"] = ( f"Basic {_base64_auth_header(resolved_basic_auth)}" ) if resolved_bearer_auth: headers["authorization"] = f"Bearer {resolved_bearer_auth}" return headers def create_sniff_callback( host_info_callback: Optional[ Callable[[Dict[str, Any], Dict[str, Any]], Optional[Dict[str, Any]]] ] = None, sniffed_node_callback: Optional[ Callable[[Dict[str, Any], NodeConfig], Optional[NodeConfig]] ] = None, ) -> _TYPE_ASYNC_SNIFF_CALLBACK: assert (host_info_callback is None) != (sniffed_node_callback is None) # Wrap the deprecated 'host_info_callback' into 'sniffed_node_callback' if host_info_callback is not None: def _sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: assert host_info_callback is not None if ( host_info_callback( # type ignore[misc] node_info, {"host": node_config.host, "port": node_config.port} ) is None ): return None return node_config sniffed_node_callback = _sniffed_node_callback async def sniff_callback( transport: AsyncTransport, sniff_options: SniffOptions ) -> List[NodeConfig]: for _ in transport.node_pool.all(): try: meta, node_infos = await transport.perform_request( "GET", "/_nodes/_all/http", headers={ "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, request_timeout=( sniff_options.sniff_timeout if not sniff_options.is_initial_sniff else None ), ) except (SerializationError, ConnectionError): continue if not 200 <= meta.status <= 299: continue node_configs = [] for node_info in node_infos.get("nodes", {}).values(): address = node_info.get("http", {}).get("publish_address") if not address or ":" not in address: continue if "/" in address: # Support 7.x host/ip:port behavior where http.publish_host has been set. fqdn, ipaddress = address.split("/", 1) host = fqdn _, port_str = ipaddress.rsplit(":", 1) port = int(port_str) else: host, port_str = address.rsplit(":", 1) port = int(port_str) assert sniffed_node_callback is not None sniffed_node = sniffed_node_callback( node_info, meta.node.replace(host=host, port=port) ) if sniffed_node is None: continue # Use the node which was able to make the request as a base. node_configs.append(sniffed_node) if node_configs: return node_configs return [] return sniff_callback def _default_sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: if node_info.get("roles", []) == ["master"]: return None return node_config default_sniff_callback = create_sniff_callback( sniffed_node_callback=_default_sniffed_node_callback ) class BaseClient: def __init__(self, _transport: AsyncTransport) -> None: self._transport = _transport self._client_meta: Union[DefaultType, Tuple[Tuple[str, str], ...]] = DEFAULT self._headers = HttpHeaders() self._request_timeout: Union[DefaultType, Optional[float]] = DEFAULT self._ignore_status: Union[DefaultType, Collection[int]] = DEFAULT self._max_retries: Union[DefaultType, int] = DEFAULT self._retry_on_timeout: Union[DefaultType, bool] = DEFAULT self._retry_on_status: Union[DefaultType, Collection[int]] = DEFAULT self._verified_elasticsearch = False self._otel = OpenTelemetry() @property def transport(self) -> AsyncTransport: return self._transport async def perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, endpoint_id: Optional[str] = None, path_parts: Optional[Mapping[str, Any]] = None, ) -> ApiResponse[Any]: with self._otel.span( method, endpoint_id=endpoint_id, path_parts=path_parts or {}, ) as otel_span: response = await self._perform_request( method, path, params=params, headers=headers, body=body, otel_span=otel_span, ) otel_span.set_elastic_cloud_metadata(response.meta.headers) return response async def _perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, otel_span: OpenTelemetrySpan, ) -> ApiResponse[Any]: if headers: request_headers = self._headers.copy() request_headers.update(headers) else: request_headers = self._headers def mimetype_header_to_compat(header: str) -> None: # Converts all parts of a Accept/Content-Type headers # from application/X -> application/vnd.elasticsearch+X mimetype = request_headers.get(header, None) if mimetype: request_headers[header] = _COMPAT_MIMETYPE_RE.sub( _COMPAT_MIMETYPE_SUB, mimetype ) mimetype_header_to_compat("Accept") mimetype_header_to_compat("Content-Type") if params: target = f"{path}?{_quote_query(params)}" else: target = path meta, resp_body = await self.transport.perform_request( method, target, headers=request_headers, body=body, request_timeout=self._request_timeout, max_retries=self._max_retries, retry_on_status=self._retry_on_status, retry_on_timeout=self._retry_on_timeout, client_meta=self._client_meta, otel_span=otel_span, ) # HEAD with a 404 is returned as a normal response # since this is used as an 'exists' functionality. if not (method == "HEAD" and meta.status == 404) and ( not 200 <= meta.status < 299 and ( self._ignore_status is DEFAULT or self._ignore_status is None or meta.status not in self._ignore_status ) ): message = str(resp_body) # If the response is an error response try parsing # the raw Elasticsearch error before raising. if isinstance(resp_body, dict): try: error = resp_body.get("error", message) if isinstance(error, dict) and "type" in error: error = error["type"] message = error except (ValueError, KeyError, TypeError): pass raise HTTP_EXCEPTIONS.get(meta.status, ApiError)( message=message, meta=meta, body=resp_body ) # 'X-Elastic-Product: Elasticsearch' should be on every 2XX response. if not self._verified_elasticsearch: # If the header is set we mark the server as verified. if meta.headers.get("x-elastic-product", "") == "Elasticsearch": self._verified_elasticsearch = True # Otherwise we only raise an error on 2XX responses. elif meta.status >= 200 and meta.status < 300: raise UnsupportedProductError( message=( "The client noticed that the server is not Elasticsearch " "and we do not support this unknown product" ), meta=meta, body=resp_body, ) # 'Warning' headers should be reraised as 'ElasticsearchWarning' if "warning" in meta.headers: warning_header = (meta.headers.get("warning") or "").strip() warning_messages: Iterable[str] = _WARNING_RE.findall(warning_header) or ( warning_header, ) stacklevel = warn_stacklevel() for warning_message in warning_messages: warnings.warn( warning_message, category=ElasticsearchWarning, stacklevel=stacklevel, ) if method == "HEAD": response = HeadApiResponse(meta=meta) elif isinstance(resp_body, dict): response = ObjectApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] elif isinstance(resp_body, list): response = ListApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] elif isinstance(resp_body, str): response = TextApiResponse( # type: ignore[assignment] body=resp_body, meta=meta, ) elif isinstance(resp_body, bytes): response = BinaryApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] else: response = ApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] return response class NamespacedClient(BaseClient): def __init__(self, client: "BaseClient") -> None: self._client = client super().__init__(self._client.transport) async def perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, endpoint_id: Optional[str] = None, path_parts: Optional[Mapping[str, Any]] = None, ) -> ApiResponse[Any]: # Use the internal clients .perform_request() implementation # so we take advantage of their transport options. return await self._client.perform_request( method, path, params=params, headers=headers, body=body, endpoint_id=endpoint_id, path_parts=path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/async_search.py000066400000000000000000000735741506101734100262510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class AsyncSearchClient(NamespacedClient): @_rewrite_parameters() async def delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async search.

If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

``_ :param id: A unique identifier for the async search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="async_search.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, typed_keys: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async search results.

Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response :param wait_for_completion_timeout: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="async_search.get", path_parts=__path_parts, ) @_rewrite_parameters() async def status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async search status.

Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:

  • The user or API key that submitted the original async search request.
  • Users that have the monitor cluster privilege or greater privileges.
``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="async_search.status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rescore", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) async def submit( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an async search.

When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.

Warning: Asynchronous search does not support scroll or search requests that include only the suggest section.

By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices :param aggregations: :param aggs: :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param allow_partial_search_results: Indicate if an error should be returned if there is a partial search failure or timeout :param analyze_wildcard: Specify whether wildcard and prefix queries should be analyzed (default: false) :param analyzer: The analyzer to use for the query string :param batched_reduce_size: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). :param ccs_minimize_roundtrips: The default value is the only supported value. :param collapse: :param default_operator: The default operator for query string query (AND or OR) :param df: The field to use as default where no field prefix is given in the query string :param docvalue_fields: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param explain: If true, returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param highlight: :param ignore_throttled: Whether specified concrete, expanded or aliased indices should be ignored when throttled :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. :param keep_alive: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. :param lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :param max_concurrent_shard_requests: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: :param q: Query in the Lucene query string syntax :param query: Defines the search definition using the Query DSL. :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. :param size: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param slice: :param sort: :param source: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. :param source_excludes: A list of fields to exclude from the returned _source field :param source_includes: A list of fields to extract and return from the _source field :param stats: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. :param suggest: :param suggest_field: Specifies which field to use for suggestions. :param suggest_mode: Specify suggest mode :param suggest_size: How many suggestions to return in response :param suggest_text: The source text for which the suggestions should be returned. :param terminate_after: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If true, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response :param version: If true, returns document version as part of a hit. :param wait_for_completion_timeout: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_async_search' else: __path_parts = {} __path = "/_async_search" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if keep_alive is not None: __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if knn is not None: __body["knn"] = knn if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rescore is not None: __body["rescore"] = rescore if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="async_search.submit", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/autoscaling.py000066400000000000000000000257771506101734100261220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class AutoscalingClient(NamespacedClient): @_rewrite_parameters() async def delete_autoscaling_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="autoscaling.delete_autoscaling_policy", path_parts=__path_parts, ) @_rewrite_parameters() async def get_autoscaling_capacity( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the autoscaling capacity.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload.

The required_capacity is calculated as the maximum of the required_capacity result of all individual deciders that are enabled for the policy.

The operator should verify that the current_nodes match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information.

The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="autoscaling.get_autoscaling_capacity", path_parts=__path_parts, ) @_rewrite_parameters() async def get_autoscaling_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="autoscaling.get_autoscaling_policy", path_parts=__path_parts, ) @_rewrite_parameters( body_name="policy", ) async def put_autoscaling_policy( self, *, name: str, policy: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param policy: :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if policy is None and body is None: raise ValueError( "Empty value passed for parameters 'policy' and 'body', one of them should be set." ) elif policy is not None and body is not None: raise ValueError("Cannot set both 'policy' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="autoscaling.put_autoscaling_policy", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/cat.py000066400000000000000000005335651506101734100243570ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class CatClient(NamespacedClient): @_rewrite_parameters() async def aliases( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "alias", "filter", "index", "is_write_index", "routing.index", "routing.search", ], ] ], t.Union[ str, t.Literal[ "alias", "filter", "index", "is_write_index", "routing.index", "routing.search", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get aliases.

Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/aliases/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/aliases" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.aliases", path_parts=__path_parts, ) @_rewrite_parameters() async def allocation( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "disk.avail", "disk.indices", "disk.indices.forecast", "disk.percent", "disk.total", "disk.used", "host", "ip", "node", "node.role", "shards", "shards.undesired", "write_load.forecast", ], ] ], t.Union[ str, t.Literal[ "disk.avail", "disk.indices", "disk.indices.forecast", "disk.percent", "disk.total", "disk.used", "host", "ip", "node", "node.role", "shards", "shards.undesired", "write_load.forecast", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard allocation information.

Get a snapshot of the number of shards allocated to each data node and their disk space.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

``_ :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_cat/allocation/{__path_parts["node_id"]}' else: __path_parts = {} __path = "/_cat/allocation" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.allocation", path_parts=__path_parts, ) @_rewrite_parameters() async def component_templates( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "alias_count", "included_in", "mapping_count", "metadata_count", "name", "settings_count", "version", ], ] ], t.Union[ str, t.Literal[ "alias_count", "included_in", "mapping_count", "metadata_count", "name", "settings_count", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get component templates.

Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API.

``_ :param name: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: The period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/component_templates/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/component_templates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.component_templates", path_parts=__path_parts, ) @_rewrite_parameters() async def count( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]], t.Union[str, t.Literal["count", "epoch", "timestamp"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get a document count.

Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/count/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/count" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.count", path_parts=__path_parts, ) @_rewrite_parameters() async def fielddata( self, *, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]] ], t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get field data cache information.

Get the amount of heap memory currently used by the field data cache on every data node in the cluster.

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API.

``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if fields not in SKIP_IN_PATH: __path_parts = {"fields": _quote(fields)} __path = f'/_cat/fielddata/{__path_parts["fields"]}' else: __path_parts = {} __path = "/_cat/fielddata" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.fielddata", path_parts=__path_parts, ) @_rewrite_parameters() async def health( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "active_shards_percent", "cluster", "epoch", "init", "max_task_wait_time", "node.data", "node.total", "pending_tasks", "pri", "relo", "shards", "status", "timestamp", "unassign", "unassign.pri", ], ] ], t.Union[ str, t.Literal[ "active_shards_percent", "cluster", "epoch", "init", "max_task_wait_time", "node.data", "node.total", "pending_tasks", "pri", "relo", "shards", "status", "timestamp", "unassign", "unassign.pri", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, ts: t.Optional[bool] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get the cluster health status.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: HH:MM:SS, which is human-readable but includes no date information; Unix epoch time, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param ts: If true, returns `HH:MM:SS` and Unix epoch timestamps. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/health" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if ts is not None: __query["ts"] = ts if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.health", path_parts=__path_parts, ) @_rewrite_parameters() async def help(self) -> TextApiResponse: """ .. raw:: html

Get CAT help.

Get help for the CAT APIs.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} __headers = {"accept": "text/plain"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.help", path_parts=__path_parts, ) @_rewrite_parameters() async def indices( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "bulk.avg_size_in_bytes", "bulk.avg_time", "bulk.total_operations", "bulk.total_size_in_bytes", "bulk.total_time", "completion.size", "creation.date", "creation.date.string", "dataset.size", "dense_vector.value_count", "docs.count", "docs.deleted", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "health", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "memory.total", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "pri", "pri.bulk.avg_size_in_bytes", "pri.bulk.avg_time", "pri.bulk.total_operations", "pri.bulk.total_size_in_bytes", "pri.bulk.total_time", "pri.completion.size", "pri.dense_vector.value_count", "pri.fielddata.evictions", "pri.fielddata.memory_size", "pri.flush.total", "pri.flush.total_time", "pri.get.current", "pri.get.exists_time", "pri.get.exists_total", "pri.get.missing_time", "pri.get.missing_total", "pri.get.time", "pri.get.total", "pri.indexing.delete_current", "pri.indexing.delete_time", "pri.indexing.delete_total", "pri.indexing.index_current", "pri.indexing.index_failed", "pri.indexing.index_failed_due_to_version_conflict", "pri.indexing.index_time", "pri.indexing.index_total", "pri.memory.total", "pri.merges.current", "pri.merges.current_docs", "pri.merges.current_size", "pri.merges.total", "pri.merges.total_docs", "pri.merges.total_size", "pri.merges.total_time", "pri.query_cache.evictions", "pri.query_cache.memory_size", "pri.refresh.external_time", "pri.refresh.external_total", "pri.refresh.listeners", "pri.refresh.time", "pri.refresh.total", "pri.request_cache.evictions", "pri.request_cache.hit_count", "pri.request_cache.memory_size", "pri.request_cache.miss_count", "pri.search.fetch_current", "pri.search.fetch_time", "pri.search.fetch_total", "pri.search.open_contexts", "pri.search.query_current", "pri.search.query_time", "pri.search.query_total", "pri.search.scroll_current", "pri.search.scroll_time", "pri.search.scroll_total", "pri.segments.count", "pri.segments.fixed_bitset_memory", "pri.segments.index_writer_memory", "pri.segments.memory", "pri.segments.version_map_memory", "pri.sparse_vector.value_count", "pri.store.size", "pri.suggest.current", "pri.suggest.time", "pri.suggest.total", "pri.warmer.current", "pri.warmer.total", "pri.warmer.total_time", "query_cache.evictions", "query_cache.memory_size", "refresh.external_time", "refresh.external_total", "refresh.listeners", "refresh.time", "refresh.total", "rep", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "sparse_vector.value_count", "status", "store.size", "suggest.current", "suggest.time", "suggest.total", "uuid", "warmer.current", "warmer.total", "warmer.total_time", ], ] ], t.Union[ str, t.Literal[ "bulk.avg_size_in_bytes", "bulk.avg_time", "bulk.total_operations", "bulk.total_size_in_bytes", "bulk.total_time", "completion.size", "creation.date", "creation.date.string", "dataset.size", "dense_vector.value_count", "docs.count", "docs.deleted", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "health", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "memory.total", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "pri", "pri.bulk.avg_size_in_bytes", "pri.bulk.avg_time", "pri.bulk.total_operations", "pri.bulk.total_size_in_bytes", "pri.bulk.total_time", "pri.completion.size", "pri.dense_vector.value_count", "pri.fielddata.evictions", "pri.fielddata.memory_size", "pri.flush.total", "pri.flush.total_time", "pri.get.current", "pri.get.exists_time", "pri.get.exists_total", "pri.get.missing_time", "pri.get.missing_total", "pri.get.time", "pri.get.total", "pri.indexing.delete_current", "pri.indexing.delete_time", "pri.indexing.delete_total", "pri.indexing.index_current", "pri.indexing.index_failed", "pri.indexing.index_failed_due_to_version_conflict", "pri.indexing.index_time", "pri.indexing.index_total", "pri.memory.total", "pri.merges.current", "pri.merges.current_docs", "pri.merges.current_size", "pri.merges.total", "pri.merges.total_docs", "pri.merges.total_size", "pri.merges.total_time", "pri.query_cache.evictions", "pri.query_cache.memory_size", "pri.refresh.external_time", "pri.refresh.external_total", "pri.refresh.listeners", "pri.refresh.time", "pri.refresh.total", "pri.request_cache.evictions", "pri.request_cache.hit_count", "pri.request_cache.memory_size", "pri.request_cache.miss_count", "pri.search.fetch_current", "pri.search.fetch_time", "pri.search.fetch_total", "pri.search.open_contexts", "pri.search.query_current", "pri.search.query_time", "pri.search.query_total", "pri.search.scroll_current", "pri.search.scroll_time", "pri.search.scroll_total", "pri.segments.count", "pri.segments.fixed_bitset_memory", "pri.segments.index_writer_memory", "pri.segments.memory", "pri.segments.version_map_memory", "pri.sparse_vector.value_count", "pri.store.size", "pri.suggest.current", "pri.suggest.time", "pri.suggest.total", "pri.warmer.current", "pri.warmer.total", "pri.warmer.total_time", "query_cache.evictions", "query_cache.memory_size", "refresh.external_time", "refresh.external_total", "refresh.listeners", "refresh.time", "refresh.total", "rep", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "sparse_vector.value_count", "status", "store.size", "suggest.current", "suggest.time", "suggest.total", "uuid", "warmer.current", "warmer.total", "warmer.total_time", ], ], ] ] = None, health: t.Optional[ t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get index information.

Get high-level information about indices in a cluster, including backing indices for data streams.

Use this request to get the following information for each index in a cluster:

  • shard count
  • document count
  • deleted document count
  • primary store size
  • total store size of all shards, including shard replicas

These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs.

CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param expand_wildcards: The type of index that wildcard patterns can match. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param health: The health status used to limit returned indices. By default, the response includes indices of any health status. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/indices/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/indices" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if health is not None: __query["health"] = health if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if pri is not None: __query["pri"] = pri if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.indices", path_parts=__path_parts, ) @_rewrite_parameters() async def master( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["host", "id", "ip", "node"]]], t.Union[str, t.Literal["host", "id", "ip", "node"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get master node information.

Get information about the master node, including the ID, bound IP address, and name.

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/master" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.master", path_parts=__path_parts, ) @_rewrite_parameters() async def ml_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get data frame analytics jobs.

Get configuration and usage information about data frame analytics jobs.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API.

``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) :param bytes: The unit in which to display byte values :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_cat/ml/data_frame/analytics/{__path_parts["id"]}' else: __path_parts = {} __path = "/_cat/ml/data_frame/analytics" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters() async def ml_datafeeds( self, *, datafeed_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ] ], t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ] ], t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get datafeeds.

Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage cluster privileges to use this API.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. :param allow_no_match: Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_cat/ml/datafeeds/{__path_parts["datafeed_id"]}' else: __path_parts = {} __path = "/_cat/ml/datafeeds" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_datafeeds", path_parts=__path_parts, ) @_rewrite_parameters() async def ml_jobs( self, *, job_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get anomaly detection jobs.

Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage cluster privileges to use this API.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API.

``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_cat/ml/anomaly_detectors/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_cat/ml/anomaly_detectors" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_jobs", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def ml_trained_models( self, *, model_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, from_: t.Optional[int] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ] ], t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ] ], t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ], ] ] = None, size: t.Optional[int] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get trained models.

Get configuration and usage information about inference trained models.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API.

``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param from_: Skips the specified number of transforms. :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_cat/ml/trained_models/{__path_parts["model_id"]}' else: __path_parts = {} __path = "/_cat/ml/trained_models" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if from_ is not None: __query["from"] = from_ if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_trained_models", path_parts=__path_parts, ) @_rewrite_parameters() async def nodeattrs( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "attr", "host", "id", "ip", "node", "pid", "port", "value" ], ] ], t.Union[ str, t.Literal[ "attr", "host", "id", "ip", "node", "pid", "port", "value" ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get node attribute information.

Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/nodeattrs" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.nodeattrs", path_parts=__path_parts, ) @_rewrite_parameters() async def nodes( self, *, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "build", "completion.size", "cpu", "disk.avail", "disk.total", "disk.used", "disk.used_percent", "fielddata.evictions", "fielddata.memory_size", "file_desc.current", "file_desc.max", "file_desc.percent", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "heap.current", "heap.max", "heap.percent", "http_address", "id", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "jdk", "load_15m", "load_1m", "load_5m", "mappings.total_count", "mappings.total_estimated_overhead_in_bytes", "master", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "name", "node.role", "pid", "port", "query_cache.evictions", "query_cache.hit_count", "query_cache.memory_size", "query_cache.miss_count", "ram.current", "ram.max", "ram.percent", "refresh.time", "refresh.total", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "script.cache_evictions", "script.compilations", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "shard_stats.total_count", "suggest.current", "suggest.time", "suggest.total", "uptime", "version", ], ] ], t.Union[ str, t.Literal[ "build", "completion.size", "cpu", "disk.avail", "disk.total", "disk.used", "disk.used_percent", "fielddata.evictions", "fielddata.memory_size", "file_desc.current", "file_desc.max", "file_desc.percent", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "heap.current", "heap.max", "heap.percent", "http_address", "id", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "jdk", "load_15m", "load_1m", "load_5m", "mappings.total_count", "mappings.total_estimated_overhead_in_bytes", "master", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "name", "node.role", "pid", "port", "query_cache.evictions", "query_cache.hit_count", "query_cache.memory_size", "query_cache.miss_count", "ram.current", "ram.max", "ram.percent", "refresh.time", "refresh.total", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "script.cache_evictions", "script.compilations", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "shard_stats.total_count", "suggest.current", "suggest.time", "suggest.total", "uptime", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get node information.

Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/nodes" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if full_id is not None: __query["full_id"] = full_id if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.nodes", path_parts=__path_parts, ) @_rewrite_parameters() async def pending_tasks( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal["insertOrder", "priority", "source", "timeInQueue"], ] ], t.Union[ str, t.Literal["insertOrder", "priority", "source", "timeInQueue"] ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get pending task information.

Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/pending_tasks" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.pending_tasks", path_parts=__path_parts, ) @_rewrite_parameters() async def plugins( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal["component", "description", "id", "name", "version"], ] ], t.Union[ str, t.Literal["component", "description", "id", "name", "version"] ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get plugin information.

Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/plugins" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_bootstrap is not None: __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.plugins", path_parts=__path_parts, ) @_rewrite_parameters() async def recovery( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "bytes", "bytes_percent", "bytes_recovered", "bytes_total", "files", "files_percent", "files_recovered", "files_total", "index", "repository", "shard", "snapshot", "source_host", "source_node", "stage", "start_time", "start_time_millis", "stop_time", "stop_time_millis", "target_host", "target_node", "time", "translog_ops", "translog_ops_percent", "translog_ops_recovered", "type", ], ] ], t.Union[ str, t.Literal[ "bytes", "bytes_percent", "bytes_recovered", "bytes_total", "files", "files_percent", "files_recovered", "files_total", "index", "repository", "shard", "snapshot", "source_host", "source_node", "stage", "start_time", "start_time_millis", "stop_time", "stop_time_millis", "target_host", "target_node", "time", "translog_ops", "translog_ops_percent", "translog_ops_recovered", "type", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard recovery information.

Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. :param bytes: The unit used to display byte values. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/recovery/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/recovery" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if bytes is not None: __query["bytes"] = bytes if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.recovery", path_parts=__path_parts, ) @_rewrite_parameters() async def repositories( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get snapshot repository information.

Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/repositories" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.repositories", path_parts=__path_parts, ) @_rewrite_parameters() async def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "committed", "compound", "docs.count", "docs.deleted", "generation", "id", "index", "ip", "prirep", "searchable", "segment", "shard", "size", "size.memory", "version", ], ] ], t.Union[ str, t.Literal[ "committed", "compound", "docs.count", "docs.deleted", "generation", "id", "index", "ip", "prirep", "searchable", "segment", "shard", "size", "size.memory", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get segment information.

Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/segments/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/segments" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.segments", path_parts=__path_parts, ) @_rewrite_parameters() async def shards( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "completion.size", "dataset.size", "dense_vector.value_count", "docs", "dsparse_vector.value_count", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "id", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "node", "prirep", "query_cache.evictions", "query_cache.memory_size", "recoverysource.type", "refresh.time", "refresh.total", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "seq_no.global_checkpoint", "seq_no.local_checkpoint", "seq_no.max", "shard", "state", "store", "suggest.current", "suggest.time", "suggest.total", "sync_id", "unassigned.at", "unassigned.details", "unassigned.for", "unassigned.reason", ], ] ], t.Union[ str, t.Literal[ "completion.size", "dataset.size", "dense_vector.value_count", "docs", "dsparse_vector.value_count", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "id", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "node", "prirep", "query_cache.evictions", "query_cache.memory_size", "recoverysource.type", "refresh.time", "refresh.total", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "seq_no.global_checkpoint", "seq_no.local_checkpoint", "seq_no.max", "shard", "state", "store", "suggest.current", "suggest.time", "suggest.total", "sync_id", "unassigned.at", "unassigned.details", "unassigned.for", "unassigned.reason", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard information.

Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/shards/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/shards" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.shards", path_parts=__path_parts, ) @_rewrite_parameters() async def snapshots( self, *, repository: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "duration", "end_epoch", "end_time", "failed_shards", "id", "indices", "reason", "repository", "start_epoch", "start_time", "status", "successful_shards", "total_shards", ], ] ], t.Union[ str, t.Literal[ "duration", "end_epoch", "end_time", "failed_shards", "id", "indices", "reason", "repository", "start_epoch", "start_time", "status", "successful_shards", "total_shards", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get snapshot information.

Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if repository not in SKIP_IN_PATH: __path_parts = {"repository": _quote(repository)} __path = f'/_cat/snapshots/{__path_parts["repository"]}' else: __path_parts = {} __path = "/_cat/snapshots" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.snapshots", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def tasks( self, *, actions: t.Optional[t.Sequence[str]] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "action", "id", "ip", "node", "node_id", "parent_task_id", "port", "running_time", "running_time_ns", "start_time", "task_id", "timestamp", "type", "version", "x_opaque_id", ], ] ], t.Union[ str, t.Literal[ "action", "id", "ip", "node", "node_id", "parent_task_id", "port", "running_time", "running_time_ns", "start_time", "task_id", "timestamp", "type", "version", "x_opaque_id", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get task information.

Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if timeout is not None: __query["timeout"] = timeout if v is not None: __query["v"] = v if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.tasks", path_parts=__path_parts, ) @_rewrite_parameters() async def templates( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "composed_of", "index_patterns", "name", "order", "version" ], ] ], t.Union[ str, t.Literal[ "composed_of", "index_patterns", "name", "order", "version" ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get index template information.

Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/templates/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/templates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.templates", path_parts=__path_parts, ) @_rewrite_parameters() async def thread_pool( self, *, thread_pool_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "active", "completed", "core", "ephemeral_id", "host", "ip", "keep_alive", "largest", "max", "name", "node_id", "node_name", "pid", "pool_size", "port", "queue", "queue_size", "rejected", "size", "type", ], ] ], t.Union[ str, t.Literal[ "active", "completed", "core", "ephemeral_id", "host", "ip", "keep_alive", "largest", "max", "name", "node_id", "node_name", "pid", "pool_size", "port", "queue", "queue_size", "rejected", "size", "type", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get thread pool statistics.

Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if thread_pool_patterns not in SKIP_IN_PATH: __path_parts = {"thread_pool_patterns": _quote(thread_pool_patterns)} __path = f'/_cat/thread_pool/{__path_parts["thread_pool_patterns"]}' else: __path_parts = {} __path = "/_cat/thread_pool" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.thread_pool", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def transforms( self, *, transform_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, from_: t.Optional[int] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ] ], t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ] ], t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ], ] ] = None, size: t.Optional[int] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get transform information.

Get configuration and usage information about transforms.

CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API.

``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param from_: Skips the specified number of transforms. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_cat/transforms/{__path_parts["transform_id"]}' else: __path_parts = {} __path = "/_cat/transforms" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if from_ is not None: __query["from"] = from_ if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.transforms", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/ccr.py000066400000000000000000001413431506101734100243440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class CcrClient(NamespacedClient): @_rewrite_parameters() async def delete_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete auto-follow patterns.

Delete a collection of cross-cluster replication auto-follow patterns.

``_ :param name: The auto-follow pattern collection to delete. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ccr.delete_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "leader_index", "remote_cluster", "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", "settings", ), ) async def follow( self, *, index: str, leader_index: t.Optional[str] = None, remote_cluster: t.Optional[str] = None, data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. :param remote_cluster: The remote cluster containing the leader index. :param data_stream_name: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding write requests on the follower. :param max_read_request_operation_count: The maximum number of operations to pull per read from the remote cluster. :param max_read_request_size: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. :param max_retry_delay: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. :param max_write_buffer_count: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. :param max_write_buffer_size: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. :param max_write_request_operation_count: The maximum number of operations per bulk write request executed on the follower. :param max_write_request_size: The maximum total bytes of operations per bulk write request executed on the follower. :param read_poll_timeout: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. :param settings: Settings to override from the leader index. :param wait_for_active_shards: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if leader_index is None and body is None: raise ValueError("Empty value passed for parameter 'leader_index'") if remote_cluster is None and body is None: raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if leader_index is not None: __body["leader_index"] = leader_index if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if data_stream_name is not None: __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if settings is not None: __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.follow", path_parts=__path_parts, ) @_rewrite_parameters() async def follow_info( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get follower information.

Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

``_ :param index: A comma-delimited list of follower index patterns. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/info' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.follow_info", path_parts=__path_parts, ) @_rewrite_parameters() async def follow_stats( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get follower stats.

Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

``_ :param index: A comma-delimited list of index patterns. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/stats' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.follow_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "follower_cluster", "follower_index", "follower_index_uuid", "leader_remote_cluster", ), ) async def forget_follower( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, follower_cluster: t.Optional[str] = None, follower_index: t.Optional[str] = None, follower_index_uuid: t.Optional[str] = None, human: t.Optional[bool] = None, leader_remote_cluster: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Forget a follower. Remove the cross-cluster replication follower retention leases from the leader.

A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so.

NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

``_ :param index: the name of the leader index for which specified follower retention leases should be removed :param follower_cluster: :param follower_index: :param follower_index_uuid: :param leader_remote_cluster: :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/forget_follower' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if follower_cluster is not None: __body["follower_cluster"] = follower_cluster if follower_index is not None: __body["follower_index"] = follower_index if follower_index_uuid is not None: __body["follower_index_uuid"] = follower_index_uuid if leader_remote_cluster is not None: __body["leader_remote_cluster"] = leader_remote_cluster __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.forget_follower", path_parts=__path_parts, ) @_rewrite_parameters() async def get_auto_follow_pattern( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get auto-follow patterns.

Get cross-cluster replication auto-follow patterns.

``_ :param name: The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' else: __path_parts = {} __path = "/_ccr/auto_follow" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.get_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() async def pause_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Pause an auto-follow pattern.

Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

``_ :param name: The name of the auto-follow pattern to pause. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}/pause' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.pause_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() async def pause_follow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Pause a follower.

Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task.

``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/pause_follow' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.pause_follow", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "remote_cluster", "follow_index_pattern", "leader_index_exclusion_patterns", "leader_index_patterns", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", "settings", ), ) async def put_auto_follow_pattern( self, *, name: str, remote_cluster: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, follow_index_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None, leader_index_patterns: t.Optional[t.Sequence[str]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.

This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match against. :param follow_index_pattern: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. :param leader_index_exclusion_patterns: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. :param leader_index_patterns: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_read_request_operation_count: The maximum number of operations to pull per read from the remote cluster. :param max_read_request_size: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. :param max_retry_delay: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. :param max_write_buffer_count: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. :param max_write_buffer_size: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. :param max_write_request_operation_count: The maximum number of operations per bulk write request executed on the follower. :param max_write_request_size: The maximum total bytes of operations per bulk write request executed on the follower. :param read_poll_timeout: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. :param settings: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if remote_cluster is None and body is None: raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if follow_index_pattern is not None: __body["follow_index_pattern"] = follow_index_pattern if leader_index_exclusion_patterns is not None: __body["leader_index_exclusion_patterns"] = ( leader_index_exclusion_patterns ) if leader_index_patterns is not None: __body["leader_index_patterns"] = leader_index_patterns if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if settings is not None: __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.put_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() async def resume_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resume an auto-follow pattern.

Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

``_ :param name: The name of the auto-follow pattern to resume. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}/resume' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.resume_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", ), ) async def resume_follow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[str] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[str] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[str] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index.

``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: :param max_outstanding_write_requests: :param max_read_request_operation_count: :param max_read_request_size: :param max_retry_delay: :param max_write_buffer_count: :param max_write_buffer_size: :param max_write_request_operation_count: :param max_write_request_size: :param read_poll_timeout: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/resume_follow' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.resume_follow", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cross-cluster replication stats.

This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

``_ :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.stats", path_parts=__path_parts, ) @_rewrite_parameters() async def unfollow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Unfollow an index.

Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API.

info Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/unfollow' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.unfollow", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/cluster.py000066400000000000000000001716401506101734100252610ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class ClusterClient(NamespacedClient): @_rewrite_parameters( body_fields=("current_node", "index", "primary", "shard"), ) async def allocation_explain( self, *, current_node: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_disk_info: t.Optional[bool] = None, include_yes_decisions: t.Optional[bool] = None, index: t.Optional[str] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, primary: t.Optional[bool] = None, shard: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API.

``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. :param include_disk_info: If true, returns information about disk usage and shard sizes. :param include_yes_decisions: If true, returns YES decisions in explanation. :param index: Specifies the name of the index that you would like an explanation for. :param master_timeout: Period to wait for a connection to the master node. :param primary: If true, returns explanation for the primary shard for the given shard ID. :param shard: Specifies the ID of the shard that you would like an explanation for. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/allocation/explain" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_disk_info is not None: __query["include_disk_info"] = include_disk_info if include_yes_decisions is not None: __query["include_yes_decisions"] = include_yes_decisions if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if current_node is not None: __body["current_node"] = current_node if index is not None: __body["index"] = index if primary is not None: __body["primary"] = primary if shard is not None: __body["shard"] = shard if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.allocation_explain", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_component_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="cluster.delete_component_template", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_voting_config_exclusions( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list.

``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/voting_config_exclusions" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_removal is not None: __query["wait_for_removal"] = wait_for_removal __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="cluster.delete_voting_config_exclusions", path_parts=__path_parts, ) @_rewrite_parameters() async def exists_component_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check component templates. Returns information about whether a particular component template exists.

``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="cluster.exists_component_template", path_parts=__path_parts, ) @_rewrite_parameters() async def get_component_template( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get component templates. Get information about component templates.

``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: Return all default configurations for the component template (default: false) :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings_filter: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_component_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if settings_filter is not None: __query["settings_filter"] = settings_filter __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.get_component_template", path_parts=__path_parts, ) @_rewrite_parameters() async def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster-wide settings.

By default, it returns only settings that have been explicitly defined.

``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, also returns default values for all other cluster settings, reflecting the values in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your cluster do not all have the same values in their `elasticsearch.yml` config files then the values returned by this API may vary from invocation to invocation and may not reflect the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to fetch the settings for each individual node in your cluster. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def health( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_events: t.Optional[ t.Union[ str, t.Literal["high", "immediate", "languid", "low", "normal", "urgent"], ] ] = None, wait_for_no_initializing_shards: t.Optional[bool] = None, wait_for_no_relocating_shards: t.Optional[bool] = None, wait_for_nodes: t.Optional[t.Union[int, str]] = None, wait_for_status: t.Optional[ t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster health status.

You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.

The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status.

One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status.

``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param level: Can be one of cluster, indices or shards. Controls the details level of the health information returned. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. :param wait_for_events: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. :param wait_for_no_initializing_shards: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. :param wait_for_no_relocating_shards: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. :param wait_for_nodes: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cluster/health/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cluster/health" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if level is not None: __query["level"] = level if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_events is not None: __query["wait_for_events"] = wait_for_events if wait_for_no_initializing_shards is not None: __query["wait_for_no_initializing_shards"] = wait_for_no_initializing_shards if wait_for_no_relocating_shards is not None: __query["wait_for_no_relocating_shards"] = wait_for_no_relocating_shards if wait_for_nodes is not None: __query["wait_for_nodes"] = wait_for_nodes if wait_for_status is not None: __query["wait_for_status"] = wait_for_status __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.health", path_parts=__path_parts, ) @_rewrite_parameters() async def info( self, *, target: t.Union[ t.Sequence[ t.Union[ str, t.Literal["_all", "http", "ingest", "script", "thread_pool"] ] ], t.Union[str, t.Literal["_all", "http", "ingest", "script", "thread_pool"]], ], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster info. Returns basic information about the cluster.

``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. """ if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = {"target": _quote(target)} __path = f'/_info/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.info", path_parts=__path_parts, ) @_rewrite_parameters() async def pending_tasks( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.

NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/pending_tasks" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.pending_tasks", path_parts=__path_parts, ) @_rewrite_parameters() async def post_voting_config_exclusions( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes.

Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use DELETE /_cluster/voting_config_exclusions?wait_for_removal=false to clear the voting configuration exclusions without waiting for the nodes to leave the cluster.

A response to POST /_cluster/voting_config_exclusions with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling DELETE /_cluster/voting_config_exclusions. If the call to POST /_cluster/voting_config_exclusions fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call.

NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. :param node_names: A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. :param timeout: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/voting_config_exclusions" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if node_ids is not None: __query["node_ids"] = node_ids if node_names is not None: __query["node_names"] = node_names if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="cluster.post_voting_config_exclusions", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("template", "deprecated", "meta", "version"), parameter_aliases={"_meta": "meta"}, ) async def put_component_template( self, *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s composed_of list. Component templates are only applied to new data streams and indices as part of a matching index template.

Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.

Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices.

You can use C-style /* *\\/ block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket.

Applying component templates

You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's composed_of list.

``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. :param cause: User defined reason for create the component template. :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if template is None and body is None: raise ValueError("Empty value passed for parameter 'template'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if template is not None: __body["template"] = template if deprecated is not None: __body["deprecated"] = deprecated if meta is not None: __body["_meta"] = meta if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.put_component_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("persistent", "transient"), ) async def put_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, persistent: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, transient: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the cluster settings.

Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value.

If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or elasticsearch.yml setting. However, a change to an elasticsearch.yml setting will not override a defined transient or persistent setting.

TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use elasticsearch.yml for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes.

WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if persistent is not None: __body["persistent"] = persistent if transient is not None: __body["transient"] = transient __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.put_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def remote_info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get remote cluster information.

Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias.

info This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.remote_info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("commands",), ) async def reroute( self, *, commands: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, retry_failed: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.

It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.

The cluster can be set to disable allocations using the cluster.routing.allocation.enable setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.

The cluster will attempt to allocate a shard a maximum of index.allocation.max_retries times in a row (defaults to 5), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.

Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param metric: Limits the information returned to the specified metrics. :param retry_failed: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/reroute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if metric is not None: __query["metric"] = metric if pretty is not None: __query["pretty"] = pretty if retry_failed is not None: __query["retry_failed"] = retry_failed if timeout is not None: __query["timeout"] = timeout if not __body: if commands is not None: __body["commands"] = commands if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.reroute", path_parts=__path_parts, ) @_rewrite_parameters() async def state( self, *, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_metadata_version: t.Optional[int] = None, wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster state. Get comprehensive information about the state of the cluster.

The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.

The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response.

By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the ?local=true query parameter.

Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable.

WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs.

``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param flat_settings: Return settings in flat format (default: false) :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param local: Return local information, do not retrieve the state from master node (default: false) :param master_timeout: Specify timeout for connection to master :param wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version before timing out """ __path_parts: t.Dict[str, str] if metric not in SKIP_IN_PATH and index not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric), "index": _quote(index)} __path = f'/_cluster/state/{__path_parts["metric"]}/{__path_parts["index"]}' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_cluster/state/{__path_parts["metric"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cluster/state/_all/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cluster/state" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_metadata_version is not None: __query["wait_for_metadata_version"] = wait_for_metadata_version if wait_for_timeout is not None: __query["wait_for_timeout"] = wait_for_timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.state", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_remotes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. :param include_remotes: Include remote cluster data into the response :param timeout: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_cluster/stats/nodes/{__path_parts["node_id"]}' else: __path_parts = {} __path = "/_cluster/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_remotes is not None: __query["include_remotes"] = include_remotes if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/connector.py000066400000000000000000002323311506101734100255650ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class ConnectorClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def check_in( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check in a connector.

Update the last_seen field in the connector and set it to the current timestamp.

``_ :param connector_id: The unique identifier of the connector to be checked in """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_check_in' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.check_in", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def delete( self, *, connector_id: str, delete_sync_jobs: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, hard: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a connector.

Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually.

``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be also removed. Defaults to false. :param hard: A flag indicating if the connector should be hard deleted. """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' __query: t.Dict[str, t.Any] = {} if delete_sync_jobs is not None: __query["delete_sync_jobs"] = delete_sync_jobs if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if hard is not None: __query["hard"] = hard if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="connector.delete", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def get( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_deleted: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a connector.

Get the details about a connector.

``_ :param connector_id: The unique identifier of the connector :param include_deleted: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_deleted is not None: __query["include_deleted"] = include_deleted if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.get", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "last_access_control_sync_error", "last_access_control_sync_scheduled_at", "last_access_control_sync_status", "last_deleted_document_count", "last_incremental_sync_scheduled_at", "last_indexed_document_count", "last_seen", "last_sync_error", "last_sync_scheduled_at", "last_sync_status", "last_synced", "sync_cursor", ), ) @_stability_warning(Stability.EXPERIMENTAL) async def last_sync( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, last_access_control_sync_error: t.Optional[str] = None, last_access_control_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_access_control_sync_status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, last_deleted_document_count: t.Optional[int] = None, last_incremental_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_indexed_document_count: t.Optional[int] = None, last_seen: t.Optional[t.Union[str, t.Any]] = None, last_sync_error: t.Optional[str] = None, last_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_sync_status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, last_synced: t.Optional[t.Union[str, t.Any]] = None, pretty: t.Optional[bool] = None, sync_cursor: t.Optional[t.Any] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector last sync stats.

Update the fields related to the last sync of a connector. This action is used for analytics and monitoring.

``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: :param last_access_control_sync_scheduled_at: :param last_access_control_sync_status: :param last_deleted_document_count: :param last_incremental_sync_scheduled_at: :param last_indexed_document_count: :param last_seen: :param last_sync_error: :param last_sync_scheduled_at: :param last_sync_status: :param last_synced: :param sync_cursor: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_last_sync' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if last_access_control_sync_error is not None: __body["last_access_control_sync_error"] = ( last_access_control_sync_error ) if last_access_control_sync_scheduled_at is not None: __body["last_access_control_sync_scheduled_at"] = ( last_access_control_sync_scheduled_at ) if last_access_control_sync_status is not None: __body["last_access_control_sync_status"] = ( last_access_control_sync_status ) if last_deleted_document_count is not None: __body["last_deleted_document_count"] = last_deleted_document_count if last_incremental_sync_scheduled_at is not None: __body["last_incremental_sync_scheduled_at"] = ( last_incremental_sync_scheduled_at ) if last_indexed_document_count is not None: __body["last_indexed_document_count"] = last_indexed_document_count if last_seen is not None: __body["last_seen"] = last_seen if last_sync_error is not None: __body["last_sync_error"] = last_sync_error if last_sync_scheduled_at is not None: __body["last_sync_scheduled_at"] = last_sync_scheduled_at if last_sync_status is not None: __body["last_sync_status"] = last_sync_status if last_synced is not None: __body["last_synced"] = last_synced if sync_cursor is not None: __body["sync_cursor"] = sync_cursor __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.last_sync", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) async def list( self, *, connector_name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, include_deleted: t.Optional[bool] = None, index_name: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, service_type: t.Optional[t.Union[str, t.Sequence[str]]] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all connectors.

Get information about all connectors.

``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for :param from_: Starting offset (default: 0) :param include_deleted: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. :param index_name: A comma-separated list of connector index names to fetch connector documents for :param query: A wildcard query string that filters connectors with matching name, description or index name :param service_type: A comma-separated list of connector service types to fetch connector documents for :param size: Specifies a max number of results to get """ __path_parts: t.Dict[str, str] = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} if connector_name is not None: __query["connector_name"] = connector_name if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if include_deleted is not None: __query["include_deleted"] = include_deleted if index_name is not None: __query["index_name"] = index_name if pretty is not None: __query["pretty"] = pretty if query is not None: __query["query"] = query if service_type is not None: __query["service_type"] = service_type if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.list", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "index_name", "is_native", "language", "name", "service_type", ), ) @_stability_warning(Stability.BETA) async def post( self, *, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_name: t.Optional[str] = None, is_native: t.Optional[bool] = None, language: t.Optional[str] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, service_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a connector.

Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure.

``_ :param description: :param index_name: :param is_native: :param language: :param name: :param service_type: """ __path_parts: t.Dict[str, str] = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if index_name is not None: __body["index_name"] = index_name if is_native is not None: __body["is_native"] = is_native if language is not None: __body["language"] = language if name is not None: __body["name"] = name if service_type is not None: __body["service_type"] = service_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.post", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "index_name", "is_native", "language", "name", "service_type", ), ) @_stability_warning(Stability.BETA) async def put( self, *, connector_id: t.Optional[str] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_name: t.Optional[str] = None, is_native: t.Optional[bool] = None, language: t.Optional[str] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, service_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a connector.

``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. :param description: :param index_name: :param is_native: :param language: :param name: :param service_type: """ __path_parts: t.Dict[str, str] if connector_id not in SKIP_IN_PATH: __path_parts = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' else: __path_parts = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if index_name is not None: __body["index_name"] = index_name if is_native is not None: __body["is_native"] = is_native if language is not None: __body["language"] = language if name is not None: __body["name"] = name if service_type is not None: __body["service_type"] = service_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.put", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_cancel( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a connector sync job.

Cancel a connector sync job, which sets the status to cancelling and updates cancellation_requested_at to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = ( f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_cancel' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_cancel", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def sync_job_check_in( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check in a connector sync job. Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = ( f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_check_in", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("worker_hostname", "sync_cursor"), ) @_stability_warning(Stability.EXPERIMENTAL) async def sync_job_claim( self, *, connector_sync_job_id: str, worker_hostname: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, sync_cursor: t.Optional[t.Any] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Claim a connector sync job. This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time. Additionally, it can set the sync_cursor property for the sync job.

This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the job. :param sync_cursor: The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if worker_hostname is None and body is None: raise ValueError("Empty value passed for parameter 'worker_hostname'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if worker_hostname is not None: __body["worker_hostname"] = worker_hostname if sync_cursor is not None: __body["sync_cursor"] = sync_cursor __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_claim", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_delete( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a connector sync job.

Remove a connector sync job and its associated data. This is a destructive action that is not recoverable.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_delete", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("error",), ) @_stability_warning(Stability.EXPERIMENTAL) async def sync_job_error( self, *, connector_sync_job_id: str, error: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set a connector sync job error. Set the error field for a connector sync job and set its status to error.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if error is None and body is None: raise ValueError("Empty value passed for parameter 'error'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if error is not None: __body["error"] = error __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_error", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def sync_job_get( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a connector sync job.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_get", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) async def sync_job_list( self, *, connector_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, job_type: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["access_control", "full", "incremental"]] ], t.Union[str, t.Literal["access_control", "full", "incremental"]], ] ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all connector sync jobs.

Get information about all stored connector sync jobs listed by their creation date in ascending order.

``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) :param job_type: A comma-separated list of job types to fetch the sync jobs for :param size: Specifies a max number of results to get :param status: A sync job status to fetch connector sync jobs for """ __path_parts: t.Dict[str, str] = {} __path = "/_connector/_sync_job" __query: t.Dict[str, t.Any] = {} if connector_id is not None: __query["connector_id"] = connector_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if job_type is not None: __query["job_type"] = job_type if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if status is not None: __query["status"] = status __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_list", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id", "job_type", "trigger_method"), ) @_stability_warning(Stability.BETA) async def sync_job_post( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_type: t.Optional[ t.Union[str, t.Literal["access_control", "full", "incremental"]] ] = None, pretty: t.Optional[bool] = None, trigger_method: t.Optional[ t.Union[str, t.Literal["on_demand", "scheduled"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a connector sync job.

Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

``_ :param id: The id of the associated connector :param job_type: :param trigger_method: """ if id is None and body is None: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {} __path = "/_connector/_sync_job" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if job_type is not None: __body["job_type"] = job_type if trigger_method is not None: __body["trigger_method"] = trigger_method __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_post", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "deleted_document_count", "indexed_document_count", "indexed_document_volume", "last_seen", "metadata", "total_document_count", ), ) @_stability_warning(Stability.EXPERIMENTAL) async def sync_job_update_stats( self, *, connector_sync_job_id: str, deleted_document_count: t.Optional[int] = None, indexed_document_count: t.Optional[int] = None, indexed_document_volume: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, last_seen: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, total_document_count: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set the connector sync job stats. Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count. You can also update last_seen. This API is mainly used by the connector service for updating sync job information.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. :param indexed_document_count: The number of documents the sync job indexed. :param indexed_document_volume: The total size of the data (in MiB) the sync job indexed. :param last_seen: The timestamp to use in the `last_seen` property for the connector sync job. :param metadata: The connector-specific metadata. :param total_document_count: The total number of documents in the target index after the sync job finished. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if deleted_document_count is None and body is None: raise ValueError( "Empty value passed for parameter 'deleted_document_count'" ) if indexed_document_count is None and body is None: raise ValueError( "Empty value passed for parameter 'indexed_document_count'" ) if indexed_document_volume is None and body is None: raise ValueError( "Empty value passed for parameter 'indexed_document_volume'" ) __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_stats' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if deleted_document_count is not None: __body["deleted_document_count"] = deleted_document_count if indexed_document_count is not None: __body["indexed_document_count"] = indexed_document_count if indexed_document_volume is not None: __body["indexed_document_volume"] = indexed_document_volume if last_seen is not None: __body["last_seen"] = last_seen if metadata is not None: __body["metadata"] = metadata if total_document_count is not None: __body["total_document_count"] = total_document_count __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_update_stats", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def update_active_filtering( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate the connector draft filter.

Activates the valid draft filtering for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering/_activate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.update_active_filtering", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("api_key_id", "api_key_secret_id"), ) @_stability_warning(Stability.BETA) async def update_api_key_id( self, *, connector_id: str, api_key_id: t.Optional[str] = None, api_key_secret_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector API key ID.

Update the api_key_id and api_key_secret_id fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field.

``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: :param api_key_secret_id: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_api_key_id' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if api_key_id is not None: __body["api_key_id"] = api_key_id if api_key_secret_id is not None: __body["api_key_secret_id"] = api_key_secret_id __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_api_key_id", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("configuration", "values"), ) @_stability_warning(Stability.BETA) async def update_configuration( self, *, connector_id: str, configuration: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, values: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector configuration.

Update the configuration field in the connector document.

``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: :param values: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_configuration' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if configuration is not None: __body["configuration"] = configuration if values is not None: __body["values"] = values __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_configuration", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("error",), ) @_stability_warning(Stability.EXPERIMENTAL) async def update_error( self, *, connector_id: str, error: t.Optional[t.Union[None, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector error field.

Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected.

``_ :param connector_id: The unique identifier of the connector to be updated :param error: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if error is None and body is None: raise ValueError("Empty value passed for parameter 'error'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_error' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if error is not None: __body["error"] = error __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_error", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("features",), ) @_stability_warning(Stability.EXPERIMENTAL) async def update_features( self, *, connector_id: str, features: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector:

  • document-level security
  • incremental syncs
  • advanced sync rules
  • basic sync rules

Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_id: The unique identifier of the connector to be updated. :param features: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if features is None and body is None: raise ValueError("Empty value passed for parameter 'features'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_features' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if features is not None: __body["features"] = features __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_features", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) @_stability_warning(Stability.BETA) async def update_filtering( self, *, connector_id: str, advanced_snippet: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, filtering: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rules: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector filtering.

Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: :param filtering: :param rules: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if advanced_snippet is not None: __body["advanced_snippet"] = advanced_snippet if filtering is not None: __body["filtering"] = filtering if rules is not None: __body["rules"] = rules __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_filtering", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("validation",), ) @_stability_warning(Stability.EXPERIMENTAL) async def update_filtering_validation( self, *, connector_id: str, validation: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector draft filtering validation.

Update the draft filtering validation info for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated :param validation: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if validation is None and body is None: raise ValueError("Empty value passed for parameter 'validation'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering/_validation' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if validation is not None: __body["validation"] = validation __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_filtering_validation", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_name",), ) @_stability_warning(Stability.BETA) async def update_index_name( self, *, connector_id: str, index_name: t.Optional[t.Union[None, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector index name.

Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if index_name is None and body is None: raise ValueError("Empty value passed for parameter 'index_name'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_index_name' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if index_name is not None: __body["index_name"] = index_name __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_index_name", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "name"), ) @_stability_warning(Stability.BETA) async def update_name( self, *, connector_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector name and description.

``_ :param connector_id: The unique identifier of the connector to be updated :param description: :param name: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_name' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if name is not None: __body["name"] = name __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_name", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("is_native",), ) @_stability_warning(Stability.BETA) async def update_native( self, *, connector_id: str, is_native: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector is_native flag.

``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if is_native is None and body is None: raise ValueError("Empty value passed for parameter 'is_native'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_native' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if is_native is not None: __body["is_native"] = is_native __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_native", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("pipeline",), ) @_stability_warning(Stability.BETA) async def update_pipeline( self, *, connector_id: str, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector pipeline.

When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if pipeline is None and body is None: raise ValueError("Empty value passed for parameter 'pipeline'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_pipeline' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if pipeline is not None: __body["pipeline"] = pipeline __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scheduling",), ) @_stability_warning(Stability.BETA) async def update_scheduling( self, *, connector_id: str, scheduling: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector scheduling.

``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if scheduling is None and body is None: raise ValueError("Empty value passed for parameter 'scheduling'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_scheduling' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if scheduling is not None: __body["scheduling"] = scheduling __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_scheduling", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service_type",), ) @_stability_warning(Stability.BETA) async def update_service_type( self, *, connector_id: str, service_type: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector service type.

``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if service_type is None and body is None: raise ValueError("Empty value passed for parameter 'service_type'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_service_type' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if service_type is not None: __body["service_type"] = service_type __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_service_type", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("status",), ) @_stability_warning(Stability.EXPERIMENTAL) async def update_status( self, *, connector_id: str, status: t.Optional[ t.Union[ str, t.Literal[ "configured", "connected", "created", "error", "needs_configuration" ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector status.

``_ :param connector_id: The unique identifier of the connector to be updated :param status: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if status is None and body is None: raise ValueError("Empty value passed for parameter 'status'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_status' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if status is not None: __body["status"] = status __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_status", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/dangling_indices.py000066400000000000000000000205171506101734100270550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class DanglingIndicesClient(NamespacedClient): @_rewrite_parameters() async def delete_dangling_index( self, *, index_uuid: str, accept_data_loss: bool, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. :param accept_data_loss: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index_uuid'") if accept_data_loss is None: raise ValueError("Empty value passed for parameter 'accept_data_loss'") __path_parts: t.Dict[str, str] = {"index_uuid": _quote(index_uuid)} __path = f'/_dangling/{__path_parts["index_uuid"]}' __query: t.Dict[str, t.Any] = {} if accept_data_loss is not None: __query["accept_data_loss"] = accept_data_loss if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.delete_dangling_index", path_parts=__path_parts, ) @_rewrite_parameters() async def import_dangling_index( self, *, index_uuid: str, accept_data_loss: bool, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Import a dangling index.

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. :param accept_data_loss: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index_uuid'") if accept_data_loss is None: raise ValueError("Empty value passed for parameter 'accept_data_loss'") __path_parts: t.Dict[str, str] = {"index_uuid": _quote(index_uuid)} __path = f'/_dangling/{__path_parts["index_uuid"]}' __query: t.Dict[str, t.Any] = {} if accept_data_loss is not None: __query["accept_data_loss"] = accept_data_loss if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.import_dangling_index", path_parts=__path_parts, ) @_rewrite_parameters() async def list_dangling_indices( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the dangling indices.

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

Use this API to list dangling indices, which you can then import or delete.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.list_dangling_indices", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/enrich.py000066400000000000000000000260361506101734100250460ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EnrichClient(NamespacedClient): @_rewrite_parameters() async def delete_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an enrich policy. Deletes an existing enrich policy and its enrich index.

``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="enrich.delete_policy", path_parts=__path_parts, ) @_rewrite_parameters() async def execute_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an enrich policy. Create the enrich index for an existing enrich policy.

``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}/_execute' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="enrich.execute_policy", path_parts=__path_parts, ) @_rewrite_parameters() async def get_policy( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an enrich policy. Returns information about an enrich policy.

``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' else: __path_parts = {} __path = "/_enrich/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="enrich.get_policy", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("geo_match", "match", "range"), ) async def put_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an enrich policy. Creates an enrich policy.

``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if geo_match is not None: __body["geo_match"] = geo_match if match is not None: __body["match"] = match if range is not None: __body["range"] = range __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="enrich.put_policy", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="enrich.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/eql.py000066400000000000000000000370741506101734100243630ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EqlClient(NamespacedClient): @_rewrite_parameters() async def delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search.

``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="eql.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search.

``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. :param wait_for_completion_timeout: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="eql.get", path_parts=__path_parts, ) @_rewrite_parameters() async def get_status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

``_ :param id: Identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="eql.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "query", "allow_partial_search_results", "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", "fields", "filter", "keep_alive", "keep_on_completion", "max_samples_per_key", "result_position", "runtime_mappings", "size", "tiebreaker_field", "timestamp_field", "wait_for_completion_timeout", ), ) async def search( self, *, index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fetch_size: t.Optional[int] = None, fields: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, filter: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, size: t.Optional[int] = None, tiebreaker_field: t.Optional[str] = None, timestamp_field: t.Optional[str] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event.

``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param allow_partial_search_results: Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results :param allow_partial_sequence_results: This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. :param case_sensitive: :param ccs_minimize_roundtrips: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution :param event_category_field: Field containing the event classification, such as process, file, or network. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param fetch_size: Maximum number of events to search at a time for sequence queries. :param fields: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. :param filter: Query, written in Query DSL, used to filter the events on which the EQL query runs. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param keep_alive: :param keep_on_completion: :param max_samples_per_key: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. Defaults to 10 :param tiebreaker_field: Field used to sort hits with the same timestamp in ascending order :param timestamp_field: Field containing event timestamp. Default "@timestamp" :param wait_for_completion_timeout: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_eql/search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = query if allow_partial_search_results is not None: __body["allow_partial_search_results"] = allow_partial_search_results if allow_partial_sequence_results is not None: __body["allow_partial_sequence_results"] = ( allow_partial_sequence_results ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: __body["event_category_field"] = event_category_field if fetch_size is not None: __body["fetch_size"] = fetch_size if fields is not None: __body["fields"] = fields if filter is not None: __body["filter"] = filter if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if max_samples_per_key is not None: __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if size is not None: __body["size"] = size if tiebreaker_field is not None: __body["tiebreaker_field"] = tiebreaker_field if timestamp_field is not None: __body["timestamp_field"] = timestamp_field if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="eql.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/esql.py000066400000000000000000000666471506101734100245560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) if t.TYPE_CHECKING: from elasticsearch.esql import ESQLBase class EsqlClient(NamespacedClient): @_rewrite_parameters( body_fields=( "query", "columnar", "filter", "include_ccs_metadata", "keep_alive", "keep_on_completion", "locale", "params", "profile", "tables", "wait_for_completion_timeout", ), ignore_deprecated_options={"params"}, ) async def async_query( self, *, query: t.Optional[t.Union[str, "ESQLBase"]] = None, allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, include_ccs_metadata: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, tables: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.

The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures. To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. :param delimiter: The character to use between values within a CSV row. It is valid only for the CSV format. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. For async requests, nothing will be returned if the async query doesn't finish within the timeout. The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. :param include_ccs_metadata: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. :param keep_alive: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. :param keep_on_completion: Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. :param profile: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. :param tables: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. :param wait_for_completion_timeout: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_query/async" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_partial_results is not None: __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = str(query) if columnar is not None: __body["columnar"] = columnar if filter is not None: __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if tables is not None: __body["tables"] = tables if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="esql.async_query", path_parts=__path_parts, ) @_rewrite_parameters() async def async_query_delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted.

If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:

  • The authenticated user that submitted the original query request
  • Users with the cancel_task cluster privilege
``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_delete", path_parts=__path_parts, ) @_rewrite_parameters() async def async_query_get( self, *, id: str, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. :param wait_for_completion_timeout: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_get", path_parts=__path_parts, ) @_rewrite_parameters() async def async_query_stop( self, *, id: str, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop async ES|QL query.

This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}/stop' __query: t.Dict[str, t.Any] = {} if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_stop", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_query( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query.

``_ :param id: The query ID """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/queries/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.get_query", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def list_queries( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_query/queries" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.list_queries", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "query", "columnar", "filter", "include_ccs_metadata", "locale", "params", "profile", "tables", ), ignore_deprecated_options={"params"}, ) async def query( self, *, query: t.Optional[t.Union[str, "ESQLBase"]] = None, allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, include_ccs_metadata: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, tables: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query.

``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures. To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. :param delimiter: The character to use between values within a CSV row. Only valid for the CSV format. :param drop_null_columns: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. :param include_ccs_metadata: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. :param profile: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. :param tables: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_partial_results is not None: __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = str(query) if columnar is not None: __body["columnar"] = columnar if filter is not None: __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if tables is not None: __body["tables"] = tables __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="esql.query", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/features.py000066400000000000000000000140641506101734100254120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import Stability, _rewrite_parameters, _stability_warning class FeaturesClient(NamespacedClient): @_rewrite_parameters() async def get_features( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the features. Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.

A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together.

The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="features.get_features", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def reset_features( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.

WARNING: Intended for development and testing use only. Do not reset features on a production cluster.

Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices.

The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature.

Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins.

To list the features that will be affected, use the get features API.

IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="features.reset_features", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/fleet.py000066400000000000000000000745651506101734100247070ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class FleetClient(NamespacedClient): @_rewrite_parameters() async def global_checkpoints( self, *, index: str, checkpoints: t.Optional[t.Sequence[int]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_advance: t.Optional[bool] = None, wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get global checkpoints.

Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project.

``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. :param timeout: Period to wait for a global checkpoints to advance past `checkpoints`. :param wait_for_advance: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. :param wait_for_index: A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/global_checkpoints' __query: t.Dict[str, t.Any] = {} if checkpoints is not None: __query["checkpoints"] = checkpoints if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_advance is not None: __query["wait_for_advance"] = wait_for_advance if wait_for_index is not None: __query["wait_for_index"] = wait_for_index __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="fleet.global_checkpoints", path_parts=__path_parts, ) @_rewrite_parameters( body_name="searches", ) @_stability_warning(Stability.EXPERIMENTAL) async def msearch( self, *, searches: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the wait_for_checkpoints parameter.

``_ :param searches: :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. :param rest_total_hits_as_int: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. :param search_type: Indicates whether global term and document frequencies should be used when scoring returned documents. :param typed_keys: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. :param wait_for_checkpoints: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. """ if searches is None and body is None: raise ValueError( "Empty value passed for parameters 'searches' and 'body', one of them should be set." ) elif searches is not None and body is not None: raise ValueError("Cannot set both 'searches' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/_fleet_msearch' else: __path_parts = {} __path = "/_fleet/_fleet_msearch" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_checkpoints is not None: __query["wait_for_checkpoints"] = wait_for_checkpoints __body = searches if searches is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="fleet.msearch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "min_score", "pit", "post_filter", "profile", "query", "rescore", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) @_stability_warning(Stability.EXPERIMENTAL) async def search( self, *, index: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

``_ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param aggregations: :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: :param ccs_minimize_roundtrips: :param collapse: :param default_operator: :param df: :param docvalue_fields: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. :param expand_wildcards: :param explain: If true, returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param highlight: :param ignore_throttled: :param ignore_unavailable: :param indices_boost: Boosts the _score of documents from specified indices. :param lenient: :param max_concurrent_shard_requests: :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: :param pre_filter_shard_size: :param preference: :param profile: :param q: :param query: Defines the search definition using the Query DSL. :param request_cache: :param rescore: :param rest_total_hits_as_int: :param routing: :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param scroll: :param search_after: :param search_type: :param seq_no_primary_term: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. :param size: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param slice: :param sort: :param source: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. :param source_excludes: :param source_includes: :param stats: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. :param suggest: :param suggest_field: Specifies which field to use for suggestions. :param suggest_mode: :param suggest_size: :param suggest_text: The source text for which the suggestions should be returned. :param terminate_after: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If true, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. :param typed_keys: :param version: If true, returns document version as part of a hit. :param wait_for_checkpoints: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/_fleet_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_checkpoints is not None: __query["wait_for_checkpoints"] = wait_for_checkpoints if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rescore is not None: __body["rescore"] = rescore if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="fleet.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/graph.py000066400000000000000000000120201506101734100246630ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class GraphClient(NamespacedClient): @_rewrite_parameters( body_fields=("connections", "controls", "query", "vertices"), ) async def explore( self, *, index: t.Union[str, t.Sequence[str]], connections: t.Optional[t.Mapping[str, t.Any]] = None, controls: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, vertices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned.

``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. :param controls: Direct the Graph API how to build the graph. :param query: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. :param routing: Custom value used to route operations to a specific shard. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param vertices: Specifies one or more fields that contain the terms you want to include in the graph as vertices. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_graph/explore' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if not __body: if connections is not None: __body["connections"] = connections if controls is not None: __body["controls"] = controls if query is not None: __body["query"] = query if vertices is not None: __body["vertices"] = vertices if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="graph.explore", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/ilm.py000066400000000000000000000677061506101734100243700ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class IlmClient(NamespacedClient): @_rewrite_parameters() async def delete_lifecycle( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ilm.delete_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def explain_lifecycle( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, only_errors: t.Optional[bool] = None, only_managed: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices.

The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param only_errors: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. :param only_managed: Filters the returned indices to only indices that are managed by ILM. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/explain' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if only_errors is not None: __query["only_errors"] = only_errors if only_managed is not None: __query["only_managed"] = only_managed if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.explain_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def get_lifecycle( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get lifecycle policies.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' else: __path_parts = {} __path = "/_ilm/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.get_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def get_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the ILM status.

Get the current index lifecycle management status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("legacy_template_to_delete", "node_attribute"), ) async def migrate_to_data_tiers( self, *, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, legacy_template_to_delete: t.Optional[str] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_attribute: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers.

Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide:

  1. Stop setting the custom hot attribute on new indices.
  2. Remove custom allocation settings from existing ILM policies.
  3. Replace custom allocation settings from existing indices with the corresponding tier preference.

ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. :param legacy_template_to_delete: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param node_attribute: """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/migrate_to_data_tiers" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if legacy_template_to_delete is not None: __body["legacy_template_to_delete"] = legacy_template_to_delete if node_attribute is not None: __body["node_attribute"] = node_attribute if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.migrate_to_data_tiers", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("current_step", "next_step"), ) async def move_to_step( self, *, index: str, current_step: t.Optional[t.Mapping[str, t.Any]] = None, next_step: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step.

WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.

You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step.

When specifying the target (next_step) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy.

``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. :param next_step: The step that you want to run. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if current_step is None and body is None: raise ValueError("Empty value passed for parameter 'current_step'") if next_step is None and body is None: raise ValueError("Empty value passed for parameter 'next_step'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_ilm/move/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if current_step is not None: __body["current_step"] = current_step if next_step is not None: __body["next_step"] = next_step if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.move_to_step", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("policy",), ) async def put_lifecycle( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, policy: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented.

NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param policy: :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if policy is not None: __body["policy"] = policy if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.put_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def remove_policy( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices.

``_ :param index: The name of the index to remove policy on """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/remove' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.remove_policy", path_parts=__path_parts, ) @_rewrite_parameters() async def retry( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step.

``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/retry' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.retry", path_parts=__path_parts, ) @_rewrite_parameters() async def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.start", path_parts=__path_parts, ) @_rewrite_parameters() async def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.

The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.stop", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/indices.py000066400000000000000000011106261506101734100252140ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class IndicesClient(NamespacedClient): @_rewrite_parameters() async def add_block( self, *, index: str, block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add an index block.

Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types.

``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. :param block: The block type to add to the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if block in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'block'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "block": _quote(block), } __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="indices.add_block", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analyzer", "attributes", "char_filter", "explain", "field", "filter", "normalizer", "text", "tokenizer", ), ) async def analyze( self, *, index: t.Optional[str] = None, analyzer: t.Optional[str] = None, attributes: t.Optional[t.Sequence[str]] = None, char_filter: t.Optional[t.Sequence[t.Union[str, t.Mapping[str, t.Any]]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, field: t.Optional[str] = None, filter: t.Optional[t.Sequence[t.Union[str, t.Mapping[str, t.Any]]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, normalizer: t.Optional[str] = None, pretty: t.Optional[bool] = None, text: t.Optional[t.Union[str, t.Sequence[str]]] = None, tokenizer: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens.

Generating excessive amount of tokens may cause a node to run out of memory. The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The _analyze endpoint without a specified index will always use 10000 as its limit.

``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. :param analyzer: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. :param attributes: Array of token attributes used to filter the output of the `explain` parameter. :param char_filter: Array of character filters used to preprocess characters before the tokenizer. :param explain: If `true`, the response includes token attributes and additional details. :param field: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. :param filter: Array of token filters used to apply after the tokenizer. :param normalizer: Normalizer to use to convert text into a single token. :param text: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. :param tokenizer: Tokenizer to use to convert text into tokens. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_analyze' else: __path_parts = {} __path = "/_analyze" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analyzer is not None: __body["analyzer"] = analyzer if attributes is not None: __body["attributes"] = attributes if char_filter is not None: __body["char_filter"] = char_filter if explain is not None: __body["explain"] = explain if field is not None: __body["field"] = field if filter is not None: __body["filter"] = filter if normalizer is not None: __body["normalizer"] = normalizer if text is not None: __body["text"] = text if tokenizer is not None: __body["tokenizer"] = tokenizer if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.analyze", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def cancel_migrate_reindex( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a migration reindex operation.

Cancel a migration reindex attempt for a data stream or index.

``_ :param index: The index or data stream name """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.cancel_migrate_reindex", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_cache( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fielddata: t.Optional[bool] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[bool] = None, request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices.

By default, the clear cache API clears all caches. To clear only specific caches, use the fielddata, query, or request parameters. To clear the cache only of specific fields, use the fields parameter.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` parameter. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param query: If `true`, clears the query cache. :param request: If `true`, clears the request cache. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_cache/clear' else: __path_parts = {} __path = "/_cache/clear" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fielddata is not None: __query["fielddata"] = fielddata if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if query is not None: __query["query"] = query if request is not None: __query["request"] = request __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.clear_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) async def clone( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index.

IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index.

The clone API copies most index settings from the source index to the resulting index, with the exception of index.number_of_replicas and index.auto_expand_replicas. To set the number of replicas in the resulting index, configure these settings in the clone request.

Cloning works as follows:

  • First, it creates a new target index with the same definition as the source index.
  • Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • Finally, it recovers the target index as though it were a closed index which had just been re-opened.

IMPORTANT: Indices can only be cloned if they meet the following requirements:

  • The index must be marked as read-only and have a cluster health status of green.
  • The target index must not exist.
  • The source index must have the same number of primary shards as the target index.
  • The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.

The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.

NOTE: Mappings cannot be specified in the _clone request. The mappings of the source index will be used for the target index.

Monitor the cloning process

The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the wait_for_status parameter to yellow.

The _clone API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.

Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.

Wait for active shards

Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. :param aliases: Aliases for the resulting index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_clone/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.clone", path_parts=__path_parts, ) @_rewrite_parameters() async def close( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.

When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the ignore_unavailable=true parameter.

By default, you must explicitly name the indices you are opening or closing. To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_close' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.close", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "mappings", "settings"), ) async def create( self, *, index: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following:

  • Settings for the index.
  • Mappings for fields in the index.
  • Index aliases

Wait for active shards

By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, acknowledged indicates whether the index was successfully created in the cluster, while shards_acknowledged indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either acknowledged or shards_acknowledged to be false, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If acknowledged is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If shards_acknowledged is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, acknowledged is true).

You can change the default of only waiting for the primary shards to start through the index setting index.write.wait_for_active_shards. Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

``_ :param index: Name of the index you wish to create. Index names must meet the following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins :param aliases: Aliases for the index. :param mappings: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if mappings is not None: __body["mappings"] = mappings if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.create", path_parts=__path_parts, ) @_rewrite_parameters() async def create_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a data stream.

You must have a matching index template with data stream enabled.

``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="indices.create_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_name="create_from", ) @_stability_warning(Stability.EXPERIMENTAL) async def create_from( self, *, source: str, dest: str, create_from: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an index from a source index.

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

``_ :param source: The source index or data stream name :param dest: The destination index or data stream name :param create_from: """ if source in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'source'") if dest in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'dest'") if create_from is None and body is None: raise ValueError( "Empty value passed for parameters 'create_from' and 'body', one of them should be set." ) elif create_from is not None and body is not None: raise ValueError("Cannot set both 'create_from' and 'body'") __path_parts: t.Dict[str, str] = { "source": _quote(source), "dest": _quote(dest), } __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = create_from if create_from is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.create_from", path_parts=__path_parts, ) @_rewrite_parameters() async def data_streams_stats( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream stats.

Get statistics for one or more data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_stats' else: __path_parts = {} __path = "/_data_stream/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.data_streams_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def delete( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards.

You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index.

``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_alias( self, *, index: t.Union[str, t.Sequence[str]], name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an alias. Removes a data stream or index from an alias.

``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). :param name: Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_alias", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams :param expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open) :param master_timeout: Specify timeout for connection to master :param timeout: Explicit timestamp for the document """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_data_stream( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data streams. Deletes one or more data streams and their backing indices.

``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data stream options. Removes the data stream options from a data stream.

``_ :param name: A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams :param expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open) :param master_timeout: Specify timeout for connection to master :param timeout: Explicit timestamp for the document """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_index_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_index_template", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_template( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_template", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def disk_usage( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flush: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.

NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the _id field is likely underestimated while the _source field is overestimated.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flush: If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param run_expensive_tasks: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_disk_usage' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flush is not None: __query["flush"] = flush if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if run_expensive_tasks is not None: __query["run_expensive_tasks"] = run_expensive_tasks __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.disk_usage", path_parts=__path_parts, ) @_rewrite_parameters( body_name="config", ) @_stability_warning(Stability.EXPERIMENTAL) async def downsample( self, *, index: str, target_index: str, config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index.

NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (index.blocks.write: true).

``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. :param config: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target_index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target_index'") if config is None and body is None: raise ValueError( "Empty value passed for parameters 'config' and 'body', one of them should be set." ) elif config is not None and body is not None: raise ValueError("Cannot set both 'config' and 'body'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target_index": _quote(target_index), } __path = f'/{__path_parts["index"]}/_downsample/{__path_parts["target_index"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = config if config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.downsample", path_parts=__path_parts, ) @_rewrite_parameters() async def exists( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check indices. Check if one or more indices, index aliases, or data streams exist.

``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. :param local: If `true`, the request retrieves information from the local node only. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists", path_parts=__path_parts, ) @_rewrite_parameters() async def exists_alias( self, *, name: t.Union[str, t.Sequence[str]], index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check aliases.

Check if one or more data stream or index aliases exist.

``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_alias/{__path_parts["name"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_alias", path_parts=__path_parts, ) @_rewrite_parameters() async def exists_index_template( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check index templates.

Check whether index templates exist.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param flat_settings: If true, returns settings in flat format. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_index_template", path_parts=__path_parts, ) @_rewrite_parameters() async def exists_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. :param flat_settings: Indicates whether to use a flat format for the response. :param local: Indicates whether to get information from the local node only. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_template", path_parts=__path_parts, ) @_rewrite_parameters() async def explain_data_lifecycle( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values the system uses for the index's lifecycle :param master_timeout: Specify timeout for connection to master """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_lifecycle/explain' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.explain_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def field_usage_stats( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.

The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_field_usage_stats' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.field_usage_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def flush( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.

After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.

It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param wait_if_ongoing: If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_flush' else: __path_parts = {} __path = "/_flush" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if wait_if_ongoing is not None: __query["wait_if_ongoing"] = wait_if_ongoing __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.flush", path_parts=__path_parts, ) @_rewrite_parameters() async def forcemerge( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flush: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, max_num_segments: t.Optional[int] = None, only_expunge_deletes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices.

Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.

WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.

Blocks during a force merge

Calls to this API block until the merge is complete (unless request contains wait_for_completion=false). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete.

Running force merge asynchronously

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at _tasks/<task_id>. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.

Force merging multiple indices

You can force merge multiple indices with a single request by targeting:

  • One or more data streams that contain multiple backing indices
  • Multiple indices
  • One or more aliases
  • All data streams and indices in a cluster

Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single force_merge thread which means that the shards on that node are force-merged one at a time. If you expand the force_merge threadpool on a node then it will force merge its shards in parallel

Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case max_num_segments parameter is set to 1, to rewrite all segments into a new one.

Data streams and time-based indices

Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example:

POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
          
``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param flush: Specify whether the index should be flushed after performing the operation (default: true) :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param max_num_segments: The number of segments the index should be merged into (default: dynamic) :param only_expunge_deletes: Specify whether the operation should only expunge deleted documents :param wait_for_completion: Should the request wait until the force merge is completed. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_forcemerge' else: __path_parts = {} __path = "/_forcemerge" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flush is not None: __query["flush"] = flush if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_num_segments is not None: __query["max_num_segments"] = max_num_segments if only_expunge_deletes is not None: __query["only_expunge_deletes"] = only_expunge_deletes if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.forcemerge", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, features: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["aliases", "mappings", "settings"]]], t.Union[str, t.Literal["aliases", "mappings", "settings"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices.

``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden. :param features: Return only information on specified index features :param flat_settings: If true, returns settings in flat format. :param ignore_unavailable: If false, requests that target a missing index return an error. :param include_defaults: If true, return all default settings in the response. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if features is not None: __query["features"] = features if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get", path_parts=__path_parts, ) @_rewrite_parameters() async def get_alias( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get aliases. Retrieves information for one or more data stream or index aliases.

``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param name: Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_alias' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_alias/{__path_parts["name"]}' else: __path_parts = {} __path = "/_alias" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_alias", path_parts=__path_parts, ) @_rewrite_parameters() async def get_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream lifecycles.

Get the data stream lifecycle configuration of one or more data streams.

``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def get_data_lifecycle_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_lifecycle_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_data_stream( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data streams.

Get information about one or more data streams.

``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param verbose: Whether the maximum timestamp for each data stream should be calculated and returned. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' else: __path_parts = {} __path = "/_data_stream" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() async def get_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream options.

Get the data stream options configuration of one or more data streams.

``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters() async def get_data_stream_settings( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream settings.

Get setting information for one or more data streams.

``_ :param name: A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_settings' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def get_field_mapping( self, *, fields: t.Union[str, t.Sequence[str]], index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices.

This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'fields'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and fields not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "fields": _quote(fields)} __path = f'/{__path_parts["index"]}/_mapping/field/{__path_parts["fields"]}' elif fields not in SKIP_IN_PATH: __path_parts = {"fields": _quote(fields)} __path = f'/_mapping/field/{__path_parts["fields"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_field_mapping", path_parts=__path_parts, ) @_rewrite_parameters() async def get_index_template( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index templates. Get information about one or more index templates.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param flat_settings: If true, returns settings in flat format. :param include_defaults: If true, returns all relevant default configurations for the index template. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_index_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_index_template", path_parts=__path_parts, ) @_rewrite_parameters() async def get_mapping( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mapping' else: __path_parts = {} __path = "/_mapping" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_mapping", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_migrate_reindex_status( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the migration reindexing status.

Get the status of a migration reindex attempt for a data stream or index.

``_ :param index: The index or data stream name. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_migration/reindex/{__path_parts["index"]}/_status' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_migrate_reindex_status", path_parts=__path_parts, ) @_rewrite_parameters() async def get_settings( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param name: Comma-separated list or wildcard expression of settings to retrieve. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_settings/{__path_parts["name"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_settings' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_settings/{__path_parts["name"]}' else: __path_parts = {} __path = "/_settings" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def get_template( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get legacy index templates. Get information about one or more index templates.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. :param flat_settings: If `true`, returns settings in flat format. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_template", path_parts=__path_parts, ) @_rewrite_parameters( body_name="reindex", ) @_stability_warning(Stability.EXPERIMENTAL) async def migrate_reindex( self, *, reindex: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reindex legacy backing indices.

Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task.

``_ :param reindex: """ if reindex is None and body is None: raise ValueError( "Empty value passed for parameters 'reindex' and 'body', one of them should be set." ) elif reindex is not None and body is not None: raise ValueError("Cannot set both 'reindex' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_migration/reindex" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = reindex if reindex is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.migrate_reindex", path_parts=__path_parts, ) @_rewrite_parameters() async def migrate_to_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a @timestamp field mapping of a date or date_nanos field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream.

``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/_migrate/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.migrate_to_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions",), ) async def modify_data_stream( self, *, actions: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data streams. Performs one or more data stream modification actions in a single atomic operation.

``_ :param actions: Actions to perform. """ if actions is None and body is None: raise ValueError("Empty value passed for parameter 'actions'") __path_parts: t.Dict[str, str] = {} __path = "/_data_stream/_modify" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if actions is not None: __body["actions"] = actions __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.modify_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() async def open( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open a closed index. For data streams, the API opens any closed backing indices.

A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.

When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the ignore_unavailable=true parameter.

By default, you must explicitly name the indices you are opening or closing. To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_open' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.open", path_parts=__path_parts, ) @_rewrite_parameters() async def promote_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.

With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.

NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/_promote/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.promote_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "filter", "index_routing", "is_write_index", "routing", "search_routing", ), ) async def put_alias( self, *, index: t.Union[str, t.Sequence[str]], name: str, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_routing: t.Optional[str] = None, is_write_index: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, search_routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an alias. Adds a data stream or index to an alias.

``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. :param name: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. :param filter: Query used to limit documents the alias can access. :param index_routing: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. :param is_write_index: If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param routing: Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. :param search_routing: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if filter is not None: __body["filter"] = filter if index_routing is not None: __body["index_routing"] = index_routing if is_write_index is not None: __body["is_write_index"] = is_write_index if routing is not None: __body["routing"] = routing if search_routing is not None: __body["search_routing"] = search_routing if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data_retention", "downsampling", "enabled"), ) async def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, downsampling: t.Optional[t.Mapping[str, t.Any]] = None, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream lifecycles. Update the data stream lifecycle of the specified data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. :param data_retention: If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. :param downsampling: The downsampling configuration to execute for the managed backing index after rollover. :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if data_retention is not None: __body["data_retention"] = data_retention if downsampling is not None: __body["downsampling"] = downsampling if enabled is not None: __body["enabled"] = enabled if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("failure_store",), ) async def put_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, failure_store: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream options. Update the data stream options of the specified data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param failure_store: If defined, it will update the failure store configuration of every data stream resolved by the name expression. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if failure_store is not None: __body["failure_store"] = failure_store if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters( body_name="settings", ) async def put_data_stream_settings( self, *, name: t.Union[str, t.Sequence[str]], settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream settings.

This API can be used to override settings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, only certain settings are allowed. If possible, the setting change is applied to all backing indices. Otherwise, it will be applied when the data stream is next rolled over.

``_ :param name: A comma-separated list of data streams or data stream patterns. :param settings: :param dry_run: If `true`, the request does not actually change the settings on any data streams or indices. Instead, it simulates changing the settings and reports back to the user what would have happened had these settings actually been applied. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if settings is None and body is None: raise ValueError( "Empty value passed for parameters 'settings' and 'body', one of them should be set." ) elif settings is not None and body is not None: raise ValueError("Cannot set both 'settings' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_settings' __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_stream_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_auto_create", "composed_of", "data_stream", "deprecated", "ignore_missing_component_templates", "index_patterns", "meta", "priority", "template", "version", ), parameter_aliases={"_meta": "meta"}, ) async def put_index_template( self, *, name: str, allow_auto_create: t.Optional[bool] = None, cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_missing_component_templates: t.Optional[t.Sequence[str]] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, template: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream.

You can use C-style /* *\\/ block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket.

Multiple matching templates

If multiple index templates match the name of a new index or data stream, the template with the highest priority is used.

Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.

Composing aliases, mappings, and settings

When multiple component templates are specified in the composed_of field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like dynamic_templates and meta. If an earlier component contains a dynamic_templates block, then by default new dynamic_templates entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition.

``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. :param cause: User defined reason for creating/updating the index template :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. :param create: If `true`, this request cannot replace or update existing index templates. :param data_stream: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param ignore_missing_component_templates: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist :param index_patterns: Name of the index template to create. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if allow_auto_create is not None: __body["allow_auto_create"] = allow_auto_create if composed_of is not None: __body["composed_of"] = composed_of if data_stream is not None: __body["data_stream"] = data_stream if deprecated is not None: __body["deprecated"] = deprecated if ignore_missing_component_templates is not None: __body["ignore_missing_component_templates"] = ( ignore_missing_component_templates ) if index_patterns is not None: __body["index_patterns"] = index_patterns if meta is not None: __body["_meta"] = meta if priority is not None: __body["priority"] = priority if template is not None: __body["template"] = template if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_index_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "date_detection", "dynamic", "dynamic_date_formats", "dynamic_templates", "field_names", "meta", "numeric_detection", "properties", "routing", "runtime", "source", ), parameter_aliases={ "_field_names": "field_names", "_meta": "meta", "_routing": "routing", "_source": "source", }, ) async def put_mapping( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, date_detection: t.Optional[bool] = None, dynamic: t.Optional[ t.Union[str, t.Literal["false", "runtime", "strict", "true"]] ] = None, dynamic_date_formats: t.Optional[t.Sequence[str]] = None, dynamic_templates: t.Optional[ t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, field_names: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, numeric_detection: t.Optional[bool] = None, pretty: t.Optional[bool] = None, properties: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, routing: t.Optional[t.Mapping[str, t.Any]] = None, runtime: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, write_index_only: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to:

  • Add a new field to an existing index
  • Update mappings for multiple indices in a single request
  • Add new properties to an object field
  • Enable multi-fields for an existing field
  • Update supported mapping parameters
  • Change a field's mapping using reindexing
  • Rename a field using a field alias

Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.

``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param date_detection: Controls whether dynamic date detection is enabled. :param dynamic: Controls whether new fields are added dynamically. :param dynamic_date_formats: If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. :param dynamic_templates: Specify dynamic templates for the mapping. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. :param numeric_detection: Automatically map strings into numeric data types for all fields. :param properties: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type - Mapping parameters :param routing: Enable making a routing value required on indexed documents. :param runtime: Mapping of runtime fields for the index. :param source: Control whether the _source field is enabled on the index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param write_index_only: If `true`, the mappings are applied only to the current write index for the target. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mapping' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if write_index_only is not None: __query["write_index_only"] = write_index_only if not __body: if date_detection is not None: __body["date_detection"] = date_detection if dynamic is not None: __body["dynamic"] = dynamic if dynamic_date_formats is not None: __body["dynamic_date_formats"] = dynamic_date_formats if dynamic_templates is not None: __body["dynamic_templates"] = dynamic_templates if field_names is not None: __body["_field_names"] = field_names if meta is not None: __body["_meta"] = meta if numeric_detection is not None: __body["numeric_detection"] = numeric_detection if properties is not None: __body["properties"] = properties if routing is not None: __body["_routing"] = routing if runtime is not None: __body["runtime"] = runtime if source is not None: __body["_source"] = source __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_mapping", path_parts=__path_parts, ) @_rewrite_parameters( body_name="settings", ) async def put_settings( self, *, settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preserve_existing: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reopen: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

For performance optimization during bulk indexing, you can disable the refresh interval. Refer to disable refresh interval for an example. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

{
            "number_of_replicas": 1
          }
          

Or you can use an index setting object:

{
            "index": {
              "number_of_replicas": 1
            }
          }
          

Or you can use dot annotation:

{
            "index.number_of_replicas": 1
          }
          

Or you can embed any of the aforementioned options in a settings object. For example:

{
            "settings": {
              "index": {
                "number_of_replicas": 1
              }
            }
          }
          

NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. Refer to updating analyzers on existing indices for step-by-step examples.

``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `true`, returns settings in flat format. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param preserve_existing: If `true`, existing index settings remain unchanged. :param reopen: Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if settings is None and body is None: raise ValueError( "Empty value passed for parameters 'settings' and 'body', one of them should be set." ) elif settings is not None and body is not None: raise ValueError("Cannot set both 'settings' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_settings' else: __path_parts = {} __path = "/_settings" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if preserve_existing is not None: __query["preserve_existing"] = preserve_existing if pretty is not None: __query["pretty"] = pretty if reopen is not None: __query["reopen"] = reopen if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aliases", "index_patterns", "mappings", "order", "settings", "version", ), ) async def put_template( self, *, name: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, cause: t.Optional[str] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, order: t.Optional[int] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order.

Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template.

You can use C-style /* *\\/ block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket.

Indices matching multiple templates

Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

``_ :param name: The name of the template :param aliases: Aliases for the index. :param cause: User defined reason for creating/updating the index template :param create: If true, this request cannot replace or update existing index templates. :param index_patterns: Array of wildcard expressions used to match the names of indices during creation. :param mappings: Mapping for fields in the index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param order: Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. :param settings: Configuration options for the index. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if aliases is not None: __body["aliases"] = aliases if index_patterns is not None: __body["index_patterns"] = index_patterns if mappings is not None: __body["mappings"] = mappings if order is not None: __body["order"] = order if settings is not None: __body["settings"] = settings if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_template", path_parts=__path_parts, ) @_rewrite_parameters() async def recovery( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices.

All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.

Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.

Recovery automatically occurs during the following processes:

  • When creating an index for the first time.
  • When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.
  • Creation of new replica shard copies from the primary.
  • Relocation of a shard copy to a different node in the same cluster.
  • A snapshot restore operation.
  • A clone, shrink, or split operation.

You can determine the cause of a shard recovery using the recovery or cat recovery APIs.

The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_recovery' else: __path_parts = {} __path = "/_recovery" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.recovery", path_parts=__path_parts, ) @_rewrite_parameters() async def refresh( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices.

By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the index.refresh_interval setting.

Refresh requests are synchronous and do not return a response until the refresh operation completes.

Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.

If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's refresh=wait_for query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_refresh' else: __path_parts = {} __path = "/_refresh" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.refresh", path_parts=__path_parts, ) @_rewrite_parameters() async def reload_search_analyzers( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, resource: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices.

IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.

You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer. To be eligible, the token filter must have an updateable flag of true and only be used in search analyzers.

NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param resource: Changed resource to reload analyzers from if applicable """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_reload_search_analyzers' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if resource is not None: __query["resource"] = resource __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.reload_search_analyzers", path_parts=__path_parts, ) @_rewrite_parameters() async def remove_block( self, *, index: str, block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Remove an index block.

Remove an index block from an index. Index blocks limit the operations allowed on an index by blocking specific operation types.

``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are removing blocks from. To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. :param block: The block type to remove from the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if block in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'block'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "block": _quote(block), } __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.remove_block", path_parts=__path_parts, ) @_rewrite_parameters() async def resolve_cluster( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resolve the cluster.

Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.

This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint.

For each cluster in the index expression, information is returned about:

  • Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the remote/info endpoint.
  • Whether each remote cluster is configured with skip_unavailable as true or false.
  • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
  • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
  • Cluster version information, including the Elasticsearch server version.

For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

Note on backwards compatibility

The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression dummy* to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like *:* to bypass the issue.

Advantages of using this endpoint before a cross-cluster search

You may want to exclude a cluster or index from a search when:

  • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
  • A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is logs*,remote1:logs* and the remote1 cluster has no indices, aliases or data streams that match logs*. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.
  • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
  • A remote cluster is an older version that does not support the feature you want to use in your search.

Test availability of remote clusters

The remote/info endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it.

You can use the _resolve/cluster API to attempt to reconnect to remote clusters. For example with GET _resolve/cluster or GET _resolve/cluster/*:*. The connected field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

``_ :param name: A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_unavailable: If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param timeout: The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds). """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_resolve/cluster/{__path_parts["name"]}' else: __path_parts = {} __path = "/_resolve/cluster" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.resolve_cluster", path_parts=__path_parts, ) @_rewrite_parameters() async def resolve_index( self, *, name: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported.

``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_resolve/index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.resolve_index", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "conditions", "mappings", "settings"), ) async def rollover( self, *, alias: str, new_index: t.Optional[str] = None, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, conditions: t.Optional[t.Mapping[str, t.Any]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, lazy: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.

The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target.

Roll over a data stream

If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation.

Roll over an index alias with a write index

TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers.

If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with is_write_index set to true. The API also sets is_write_index to false for the previous write index.

Roll over an index alias with one index

If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.

NOTE: A rollover creates a new index and is subject to the wait_for_active_shards setting.

Increment index names for an alias

When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with - and a number, such as my-index-000001 or my-index-3, the new index name increments that number. For example, if you roll over an alias with a current index of my-index-000001, the rollover creates a new index named my-index-000002. This number is always six characters and zero-padded, regardless of the previous index's name.

If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named <my-index-{now/d}-000001>. If you create the index on May 6, 2099, the index's name is my-index-2099.05.06-000001. If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams do not support this parameter. :param aliases: Aliases for the target index. Data streams do not support this parameter. :param conditions: Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. :param dry_run: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. :param lazy: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams. :param mappings: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the index. Data streams do not support this parameter. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'alias'") __path_parts: t.Dict[str, str] if alias not in SKIP_IN_PATH and new_index not in SKIP_IN_PATH: __path_parts = {"alias": _quote(alias), "new_index": _quote(new_index)} __path = f'/{__path_parts["alias"]}/_rollover/{__path_parts["new_index"]}' elif alias not in SKIP_IN_PATH: __path_parts = {"alias": _quote(alias)} __path = f'/{__path_parts["alias"]}/_rollover' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if lazy is not None: __query["lazy"] = lazy if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if conditions is not None: __body["conditions"] = conditions if mappings is not None: __body["mappings"] = mappings if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.rollover", path_parts=__path_parts, ) @_rewrite_parameters() async def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_segments' else: __path_parts = {} __path = "/_segments" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.segments", path_parts=__path_parts, ) @_rewrite_parameters() async def shard_stores( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, status: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["all", "green", "red", "yellow"]]], t.Union[str, t.Literal["all", "green", "red", "yellow"]], ] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices.

The index shard stores API returns the following information:

  • The node on which each replica shard exists.
  • The allocation ID for each replica shard.
  • A unique ID for each replica shard.
  • Any errors encountered while opening the shard index or from an earlier failure.

By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param status: List of shard health statuses used to limit the request. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_shard_stores' else: __path_parts = {} __path = "/_shard_stores" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if status is not None: __query["status"] = status __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.shard_stores", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) async def shrink( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Shrink an index. Shrink an index into a new index with fewer primary shards.

Before you can shrink an index:

  • The index must be read-only.
  • A copy of every shard in the index must reside on the same node.
  • The index must have a green health status.

To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation.

The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.

The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.

A shrink operation:

  • Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.
  • Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.
  • Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the .routing.allocation.initial_recovery._id index setting.

IMPORTANT: Indices can only be shrunk if they satisfy the following requirements:

  • The target index must not exist.
  • The source index must have more primary shards than the target index.
  • The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.
  • The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.
  • The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.
``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. :param aliases: The key is the alias name. Index alias names support date math. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_shrink/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.shrink", path_parts=__path_parts, ) @_rewrite_parameters() async def simulate_index_template( self, *, name: str, cause: t.Optional[str] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template.

``_ :param name: Name of the index to simulate :param cause: User defined reason for dry-run creating the new template for simulation purposes :param create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/_simulate_index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.simulate_index_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_auto_create", "composed_of", "data_stream", "deprecated", "ignore_missing_component_templates", "index_patterns", "meta", "priority", "template", "version", ), parameter_aliases={"_meta": "meta"}, ) async def simulate_template( self, *, name: t.Optional[str] = None, allow_auto_create: t.Optional[bool] = None, cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_missing_component_templates: t.Optional[t.Sequence[str]] = None, include_defaults: t.Optional[bool] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, template: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate an index template. Get the index configuration that would be applied by a particular index template.

``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. :param cause: User defined reason for dry-run creating the new template for simulation purposes :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. :param create: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. :param data_stream: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param ignore_missing_component_templates: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist :param include_defaults: If true, returns all relevant default configurations for the index template. :param index_patterns: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_index_template/_simulate/{__path_parts["name"]}' else: __path_parts = {} __path = "/_index_template/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if allow_auto_create is not None: __body["allow_auto_create"] = allow_auto_create if composed_of is not None: __body["composed_of"] = composed_of if data_stream is not None: __body["data_stream"] = data_stream if deprecated is not None: __body["deprecated"] = deprecated if ignore_missing_component_templates is not None: __body["ignore_missing_component_templates"] = ( ignore_missing_component_templates ) if index_patterns is not None: __body["index_patterns"] = index_patterns if meta is not None: __body["_meta"] = meta if priority is not None: __body["priority"] = priority if template is not None: __body["template"] = template if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.simulate_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) async def split( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Split an index. Split an index into a new index with more primary shards.

  • Before you can split an index:

  • The index must be read-only.

  • The cluster health status must be green.

You can do make an index read-only with the following request using the add index block API:

PUT /my_source_index/_block/write
          

The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.

The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the index.number_of_routing_shards setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with number_of_routing_shards set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.

A split operation:

  • Creates a new target index with the same definition as the source index, but with a larger number of primary shards.
  • Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.
  • Recovers the target index as though it were a closed index which had just been re-opened.

IMPORTANT: Indices can only be split if they satisfy the following requirements:

  • The target index must not exist.
  • The source index must have fewer primary shards than the target index.
  • The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.
  • The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.
``_ :param index: Name of the source index to split. :param target: Name of the target index to create. :param aliases: Aliases for the resulting index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_split/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.split", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, forbid_closed_indices: t.Optional[bool] = None, groups: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_segment_file_sizes: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices.

By default, the returned statistics are index-level with primaries and total aggregations. primaries are the values for only the primary shards. total are the accumulated values for both primary and replica shards.

To get shard-level statistics, set the level parameter to shards.

NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param metric: Limit the information returned the specific metrics. :param completion_fields: Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fielddata_fields: Comma-separated list or wildcard expressions of fields to include in fielddata statistics. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param forbid_closed_indices: If true, statistics are not collected from closed indices. :param groups: Comma-separated list of search groups to include in the search statistics. :param include_segment_file_sizes: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "metric": _quote(metric)} __path = f'/{__path_parts["index"]}/_stats/{__path_parts["metric"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_stats' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_stats" __query: t.Dict[str, t.Any] = {} if completion_fields is not None: __query["completion_fields"] = completion_fields if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fielddata_fields is not None: __query["fielddata_fields"] = fielddata_fields if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if forbid_closed_indices is not None: __query["forbid_closed_indices"] = forbid_closed_indices if groups is not None: __query["groups"] = groups if human is not None: __query["human"] = human if include_segment_file_sizes is not None: __query["include_segment_file_sizes"] = include_segment_file_sizes if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions",), ) async def update_aliases( self, *, actions: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an alias. Adds a data stream or index to an alias.

``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_aliases" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if actions is not None: __body["actions"] = actions __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.update_aliases", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), ) async def validate_query( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, all_shards: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rewrite: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate a query. Validates a query without running it.

``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param all_shards: If `true`, the validation is executed on all shards instead of one random shard per index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. :param analyzer: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. :param df: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. :param q: Query in the Lucene query string syntax. :param query: Query in the Lucene query string syntax. :param rewrite: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_validate/query' else: __path_parts = {} __path = "/_validate/query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if all_shards is not None: __query["all_shards"] = all_shards if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if rewrite is not None: __query["rewrite"] = rewrite if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.validate_query", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/inference.py000066400000000000000000003637741506101734100255510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters( body_fields=("input", "task_settings"), ) async def completion( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform completion inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/completion/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.completion", path_parts=__path_parts, ) @_rewrite_parameters() async def delete( self, *, inference_id: str, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an inference endpoint

``_ :param inference_id: The inference identifier. :param task_type: The task type :param dry_run: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. :param force: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="inference.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, inference_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an inference endpoint

``_ :param task_type: The task type :param inference_id: The inference Id """ __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: __path_parts = {} __path = "/_inference" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="inference.get", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "input_type", "query", "task_settings"), ) async def inference( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, input_type: t.Optional[str] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform inference on the service.

This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.

info The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param task_type: The type of inference task that the model performs. :param input_type: Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not all services support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. :param query: The query input, which is required only for the `rerank` task. It is not required for other tasks. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if input_type is not None: __body["input_type"] = input_type if query is not None: __body["query"] = query if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.inference", path_parts=__path_parts, ) @_rewrite_parameters( body_name="inference_config", ) async def put( self, *, inference_id: str, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an inference endpoint.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

The following integrations are available through the inference API. You can find the available task types next to the integration name:

  • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
  • Amazon Bedrock (completion, text_embedding)
  • Amazon SageMaker (chat_completion, completion, rerank, sparse_embedding, text_embedding)
  • Anthropic (completion)
  • Azure AI Studio (completion, text_embedding)
  • Azure OpenAI (completion, text_embedding)
  • Cohere (completion, rerank, text_embedding)
  • DeepSeek (chat_completion, completion)
  • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
  • ELSER (sparse_embedding)
  • Google AI Studio (completion, text_embedding)
  • Google Vertex AI (chat_completion, completion, rerank, text_embedding)
  • Hugging Face (chat_completion, completion, rerank, text_embedding)
  • JinaAI (rerank, text_embedding)
  • Llama (chat_completion, completion, text_embedding)
  • Mistral (chat_completion, completion, text_embedding)
  • OpenAI (chat_completion, completion, text_embedding)
  • VoyageAI (rerank, text_embedding)
  • Watsonx inference integration (text_embedding)
``_ :param inference_id: The inference Id :param inference_config: :param task_type: The task type. Refer to the integration list in the API description for the available task types. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if inference_config is None and body is None: raise ValueError( "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." ) elif inference_config is not None and body is not None: raise ValueError("Cannot set both 'inference_config' and 'body'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_alibabacloud( self, *, task_type: t.Union[ str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] ], alibabacloud_inference_id: str, service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an AlibabaCloud AI Search inference endpoint.

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

``_ :param task_type: The type of the inference task that the model will perform. :param alibabacloud_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. :param service_settings: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if alibabacloud_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'alibabacloud_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "alibabacloud_inference_id": _quote(alibabacloud_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_alibabacloud", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_amazonbedrock( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], amazonbedrock_inference_id: str, service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Amazon Bedrock inference endpoint.

Create an inference endpoint to perform an inference task with the amazonbedrock service.

info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

``_ :param task_type: The type of the inference task that the model will perform. :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `amazonbedrock`. :param service_settings: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if amazonbedrock_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'amazonbedrock_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_amazonbedrock", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_amazonsagemaker( self, *, task_type: t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ], amazonsagemaker_inference_id: str, service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Amazon SageMaker inference endpoint.

Create an inference endpoint to perform an inference task with the amazon_sagemaker service.

``_ :param task_type: The type of the inference task that the model will perform. :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `amazon_sagemaker`. :param service_settings: Settings used to install the inference model. These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type and `service_settings.api` you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if amazonsagemaker_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'amazonsagemaker_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_amazonsagemaker", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_anthropic( self, *, task_type: t.Union[str, t.Literal["completion"]], anthropic_inference_id: str, service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Anthropic inference endpoint.

Create an inference endpoint to perform an inference task with the anthropic service.

``_ :param task_type: The task type. The only valid task type for the model to perform is `completion`. :param anthropic_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `anthropic`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if anthropic_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'anthropic_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "anthropic_inference_id": _quote(anthropic_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_anthropic", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_azureaistudio( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], azureaistudio_inference_id: str, service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Azure AI studio inference endpoint.

Create an inference endpoint to perform an inference task with the azureaistudio service.

``_ :param task_type: The type of the inference task that the model will perform. :param azureaistudio_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `azureaistudio`. :param service_settings: Settings used to install the inference model. These settings are specific to the `openai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if azureaistudio_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'azureaistudio_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "azureaistudio_inference_id": _quote(azureaistudio_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_azureaistudio", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_azureopenai( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], azureopenai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Azure OpenAI inference endpoint.

Create an inference endpoint to perform an inference task with the azureopenai service.

The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. :param azureopenai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `azureopenai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `azureopenai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if azureopenai_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'azureopenai_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "azureopenai_inference_id": _quote(azureopenai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_azureopenai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_cohere( self, *, task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], cohere_inference_id: str, service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Cohere inference endpoint.

Create an inference endpoint to perform an inference task with the cohere service.

``_ :param task_type: The type of the inference task that the model will perform. :param cohere_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `cohere`. :param service_settings: Settings used to install the inference model. These settings are specific to the `cohere` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if cohere_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'cohere_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "cohere_inference_id": _quote(cohere_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_cohere", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_custom( self, *, task_type: t.Union[ str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"] ], custom_inference_id: str, service: t.Optional[t.Union[str, t.Literal["custom"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a custom inference endpoint.

The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with ${ and end with }. The parameters secret_parameters and task_settings are checked for keys for template replacement. Template replacement is supported in the request, headers, url, and query_parameters. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following:

PUT _inference/text_embedding/test-text-embedding
          {
            "service": "custom",
            "service_settings": {
               "secret_parameters": {
                    "api_key": "<some api key>"
               },
               "url": "...endpoints.huggingface.cloud/v1/embeddings",
               "headers": {
                   "Authorization": "Bearer ${api_key}",
                   "Content-Type": "application/json"
               },
               "request": "{\\"input\\": ${input}}",
               "response": {
                   "json_parser": {
                       "text_embeddings":"$.data[*].embedding[*]"
                   }
               }
            }
          }
          

To replace ${api_key} the secret_parameters and task_settings are checked for a key named api_key.

info Templates should not be surrounded by quotes.

Pre-defined templates:

  • ${input} refers to the array of input strings that comes from the input field of the subsequent inference requests.
  • ${input_type} refers to the input type translation values.
  • ${query} refers to the query field used specifically for reranking tasks.
  • ${top_n} refers to the top_n field available when performing rerank requests.
  • ${return_documents} refers to the return_documents field available when performing rerank requests.
``_ :param task_type: The type of the inference task that the model will perform. :param custom_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `custom`. :param service_settings: Settings used to install the inference model. These settings are specific to the `custom` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if custom_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'custom_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "custom_inference_id": _quote(custom_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_custom", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) async def put_deepseek( self, *, task_type: t.Union[str, t.Literal["chat_completion", "completion"]], deepseek_inference_id: str, service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a DeepSeek inference endpoint.

Create an inference endpoint to perform an inference task with the deepseek service.

``_ :param task_type: The type of the inference task that the model will perform. :param deepseek_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `deepseek`. :param service_settings: Settings used to install the inference model. These settings are specific to the `deepseek` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if deepseek_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'deepseek_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "deepseek_inference_id": _quote(deepseek_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_deepseek", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_elasticsearch( self, *, task_type: t.Union[ str, t.Literal["rerank", "sparse_embedding", "text_embedding"] ], elasticsearch_inference_id: str, service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Elasticsearch inference endpoint.

Create an inference endpoint to perform an inference task with the elasticsearch service.

info Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

info You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param elasticsearch_inference_id: The unique identifier of the inference endpoint. The must not match the `model_id`. :param service: The type of service supported for the specified task type. In this case, `elasticsearch`. :param service_settings: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if elasticsearch_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'elasticsearch_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "elasticsearch_inference_id": _quote(elasticsearch_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_elasticsearch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) async def put_elser( self, *, task_type: t.Union[str, t.Literal["sparse_embedding"]], elser_inference_id: str, service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an ELSER inference endpoint.

Create an inference endpoint to perform an inference task with the elser service. You can also deploy ELSER by using the Elasticsearch inference integration.

info Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

info You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param elser_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `elser`. :param service_settings: Settings used to install the inference model. These settings are specific to the `elser` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if elser_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'elser_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "elser_inference_id": _quote(elser_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_elser", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) async def put_googleaistudio( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], googleaistudio_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Google AI Studio inference endpoint.

Create an inference endpoint to perform an inference task with the googleaistudio service.

``_ :param task_type: The type of the inference task that the model will perform. :param googleaistudio_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `googleaistudio`. :param service_settings: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if googleaistudio_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'googleaistudio_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "googleaistudio_inference_id": _quote(googleaistudio_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_googleaistudio", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_googlevertexai( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] ], googlevertexai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Google Vertex AI inference endpoint.

Create an inference endpoint to perform an inference task with the googlevertexai service.

``_ :param task_type: The type of the inference task that the model will perform. :param googlevertexai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `googlevertexai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if googlevertexai_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'googlevertexai_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "googlevertexai_inference_id": _quote(googlevertexai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_googlevertexai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_hugging_face( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] ], huggingface_inference_id: str, service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Hugging Face inference endpoint.

Create an inference endpoint to perform an inference task with the hugging_face service. Supported tasks include: text_embedding, completion, and chat_completion.

To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use.

For Elastic's text_embedding task: The selected model must support the Sentence Embeddings task. On the new endpoint creation page, select the Sentence Embeddings task under the Advanced Configuration section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for text_embedding task:

  • all-MiniLM-L6-v2
  • all-MiniLM-L12-v2
  • all-mpnet-base-v2
  • e5-base-v2
  • e5-small-v2
  • multilingual-e5-base
  • multilingual-e5-small

For Elastic's chat_completion and completion tasks: The selected model must support the Text Generation task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for Text Generation. When creating dedicated endpoint select the Text Generation task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes /v1/chat/completions part in URL. Then, copy the full endpoint URL for use. Recommended models for chat_completion and completion tasks:

  • Mistral-7B-Instruct-v0.2
  • QwQ-32B
  • Phi-3-mini-128k-instruct

For Elastic's rerank task: The selected model must support the sentence-ranking task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for Rerank so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for rerank task:

  • bge-reranker-base
  • jina-reranker-v1-turbo-en-GGUF
``_ :param task_type: The type of the inference task that the model will perform. :param huggingface_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `hugging_face`. :param service_settings: Settings used to install the inference model. These settings are specific to the `hugging_face` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if huggingface_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'huggingface_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "huggingface_inference_id": _quote(huggingface_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_hugging_face", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_jinaai( self, *, task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], jinaai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an JinaAI inference endpoint.

Create an inference endpoint to perform an inference task with the jinaai service.

To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

``_ :param task_type: The type of the inference task that the model will perform. :param jinaai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `jinaai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `jinaai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if jinaai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "jinaai_inference_id": _quote(jinaai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_jinaai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) async def put_mistral( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], mistral_inference_id: str, service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Mistral inference endpoint.

Create an inference endpoint to perform an inference task with the mistral service.

``_ :param task_type: The type of the inference task that the model will perform. :param mistral_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `mistral`. :param service_settings: Settings used to install the inference model. These settings are specific to the `mistral` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if mistral_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'mistral_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "mistral_inference_id": _quote(mistral_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_mistral", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_openai( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], openai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an OpenAI inference endpoint.

Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. :param openai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `openai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `openai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if openai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'openai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "openai_inference_id": _quote(openai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_openai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) async def put_voyageai( self, *, task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], voyageai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a VoyageAI inference endpoint.

Create an inference endpoint to perform an inference task with the voyageai service.

Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param voyageai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `voyageai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `voyageai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if voyageai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "voyageai_inference_id": _quote(voyageai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_voyageai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings"), ) async def put_watsonx( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], watsonx_inference_id: str, service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Watsonx inference endpoint.

Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

``_ :param task_type: The type of the inference task that the model will perform. :param watsonx_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `watsonxai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if watsonx_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "watsonx_inference_id": _quote(watsonx_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_watsonx", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) async def rerank( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform reranking inference on the service

``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if query is not None: __body["query"] = query if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "task_settings"), ) async def sparse_embedding( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform sparse embedding inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.sparse_embedding", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "task_settings"), ) async def text_embedding( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform text embedding inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @_rewrite_parameters( body_name="inference_config", ) async def update( self, *, inference_id: str, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an inference endpoint.

Modify task_settings, secrets (within service_settings), or num_allocations for an inference endpoint, depending on the specific endpoint service and task_type.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: :param task_type: The type of inference task that the model performs. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if inference_config is None and body is None: raise ValueError( "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." ) elif inference_config is not None and body is not None: raise ValueError("Cannot set both 'inference_config' and 'body'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}/_update' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.update", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/ingest.py000066400000000000000000000763711506101734100250760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class IngestClient(NamespacedClient): @_rewrite_parameters() async def delete_geoip_database( self, *, id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete GeoIP database configurations.

Delete one or more IP geolocation database configurations.

``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_ip_location_database( self, *, id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete IP geolocation database configurations.

``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_pipeline( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete pipelines. Delete one or more ingest pipelines.

``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() async def geo_ip_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.geo_ip_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_geoip_database( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get GeoIP database configurations.

Get information about one or more IP geolocation database configurations.

``_ :param id: A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/geoip/database" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters() async def get_ip_location_database( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get IP geolocation database configurations.

``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/ip_location/database" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters() async def get_pipeline( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get pipelines.

Get information about one or more ingest pipelines. This API returns a local reference of the pipeline.

``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param summary: Return pipelines without their definitions (default: false) """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/pipeline" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if summary is not None: __query["summary"] = summary __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() async def processor_grok( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.processor_grok", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("maxmind", "name"), ) async def put_geoip_database( self, *, id: str, maxmind: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a GeoIP database configuration.

Refer to the create or update IP geolocation database configuration API.

``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. :param name: The provider-assigned name of the IP geolocation database to download. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if maxmind is None and body is None: raise ValueError("Empty value passed for parameter 'maxmind'") if name is None and body is None: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if maxmind is not None: __body["maxmind"] = maxmind if name is not None: __body["name"] = name __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters( body_name="configuration", ) async def put_ip_location_database( self, *, id: str, configuration: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an IP geolocation database configuration.

``_ :param id: The database configuration identifier. :param configuration: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if configuration is None and body is None: raise ValueError( "Empty value passed for parameters 'configuration' and 'body', one of them should be set." ) elif configuration is not None and body is not None: raise ValueError("Cannot set both 'configuration' and 'body'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = configuration if configuration is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "deprecated", "description", "meta", "on_failure", "processors", "version", ), parameter_aliases={"_meta": "meta"}, ) async def put_pipeline( self, *, id: str, deprecated: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_version: t.Optional[int] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, on_failure: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, processors: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a pipeline. Changes made using this API take effect immediately.

``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. :param description: Description of the ingest pipeline. :param if_version: Required version for optimistic concurrency control for pipeline updates :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. :param on_failure: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. :param processors: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param version: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_version is not None: __query["if_version"] = if_version if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if deprecated is not None: __body["deprecated"] = deprecated if description is not None: __body["description"] = description if meta is not None: __body["_meta"] = meta if on_failure is not None: __body["on_failure"] = on_failure if processors is not None: __body["processors"] = processors if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "pipeline"), ) async def simulate( self, *, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, verbose: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate a pipeline.

Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

``_ :param docs: Sample documents to test in the pipeline. :param id: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. :param pipeline: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor in the executed pipeline. """ if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}/_simulate' else: __path_parts = {} __path = "/_ingest/pipeline/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if verbose is not None: __query["verbose"] = verbose if not __body: if docs is not None: __body["docs"] = docs if pipeline is not None: __body["pipeline"] = pipeline __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.simulate", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/license.py000066400000000000000000000405161506101734100252170ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class LicenseClient(NamespacedClient): @_rewrite_parameters() async def delete( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete the license.

When the license expires, your subscription level reverts to Basic.

If the operator privileges feature is enabled, only operator users can use this API.

``_ :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="license.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, accept_enterprise: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get license information.

Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

info If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. :param local: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} if accept_enterprise is not None: __query["accept_enterprise"] = accept_enterprise if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get", path_parts=__path_parts, ) @_rewrite_parameters() async def get_basic_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the basic license status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get_basic_status", path_parts=__path_parts, ) @_rewrite_parameters() async def get_trial_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the trial status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get_trial_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("license", "licenses"), ) async def post( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, license: t.Optional[t.Mapping[str, t.Any]] = None, licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the license.

You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API.

``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: :param licenses: A sequence of one or more JSON documents containing the license information. :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if license is not None: __body["license"] = license if licenses is not None: __body["licenses"] = licenses if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="license.post", path_parts=__path_parts, ) @_rewrite_parameters() async def post_start_basic( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a basic license.

Start an indefinite basic license, which gives access to all the basic features.

NOTE: In order to start a basic license, you must not currently have a basic license.

If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

To check the status of your basic license, use the get basic license API.

``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_basic" __query: t.Dict[str, t.Any] = {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="license.post_start_basic", path_parts=__path_parts, ) @_rewrite_parameters() async def post_start_trial( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, type: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a trial. Start a 30-day trial, which gives access to all subscription features.

NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.

To check the status of your trial, use the get trial status API.

``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. :param type: The type of trial license to generate (default: "trial") """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_trial" __query: t.Dict[str, t.Any] = {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if type is not None: __query["type"] = type __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="license.post_start_trial", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/logstash.py000066400000000000000000000146061506101734100254220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class LogstashClient(NamespacedClient): @_rewrite_parameters() async def delete_pipeline( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code.

``_ :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="logstash.delete_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() async def get_pipeline( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Logstash pipelines. Get pipelines that are used for Logstash Central Management.

``_ :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' else: __path_parts = {} __path = "/_logstash/pipeline" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="logstash.get_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_name="pipeline", ) async def put_pipeline( self, *, id: str, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a Logstash pipeline.

Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced.

``_ :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if pipeline is None and body is None: raise ValueError( "Empty value passed for parameters 'pipeline' and 'body', one of them should be set." ) elif pipeline is not None and body is not None: raise ValueError("Cannot set both 'pipeline' and 'body'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = pipeline if pipeline is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="logstash.put_pipeline", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/migration.py000066400000000000000000000144121506101734100255620ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class MigrationClient(NamespacedClient): @_rewrite_parameters() async def deprecations( self, *, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.

TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.

``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_migration/deprecations' else: __path_parts = {} __path = "/_migration/deprecations" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="migration.deprecations", path_parts=__path_parts, ) @_rewrite_parameters() async def get_feature_upgrade_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress.

TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="migration.get_feature_upgrade_status", path_parts=__path_parts, ) @_rewrite_parameters() async def post_feature_upgrade( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process.

Some functionality might be temporarily unavailable during the migration process.

TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="migration.post_feature_upgrade", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/ml.py000066400000000000000000010117521506101734100242060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class MlClient(NamespacedClient): @_rewrite_parameters() async def clear_trained_model_deployment_cache( self, *, model_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear trained model deployment cache.

Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment.

``_ :param model_id: The unique identifier of the trained model. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = ( f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/cache/_clear' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.clear_trained_model_deployment_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("allow_no_match", "force", "timeout"), ) async def close_job( self, *, job_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close anomaly detection jobs.

A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param force: Refer to the descriptiion for the `force` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_close' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if force is not None: __body["force"] = force if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.close_job", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_calendar( self, *, calendar_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a calendar.

Remove all scheduled events from a calendar, then delete it.

``_ :param calendar_id: A string that uniquely identifies a calendar. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_calendar_event( self, *, calendar_id: str, event_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete events from a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if event_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'event_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "event_id": _quote(event_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events/{__path_parts["event_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar_event", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_calendar_job( self, *, calendar_id: str, job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete anomaly jobs from a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "job_id": _quote(job_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/jobs/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar_job", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_data_frame_analytics( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a data frame analytics job.

``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. :param timeout: The time to wait for the job to be deleted. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_datafeed( self, *, datafeed_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a datafeed.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param force: Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("requests_per_second", "timeout"), ) async def delete_expired_data( self, *, job_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete expired ML data.

Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>.

``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. :param requests_per_second: The desired requests per second for the deletion processes. The default behavior is no throttling. :param timeout: How long can the underlying delete processes run until they are canceled. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/_delete_expired_data/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_ml/_delete_expired_data" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if requests_per_second is not None: __body["requests_per_second"] = requests_per_second if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.delete_expired_data", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_filter( self, *, filter_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a filter.

If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.

``_ :param filter_id: A string that uniquely identifies a filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_filter", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_forecast( self, *, job_id: str, forecast_id: t.Optional[str] = None, allow_no_forecasts: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete forecasts from a job.

By default, forecasts are retained for 14 days. You can specify a different retention period with the expires_in parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire.

``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. :param allow_no_forecasts: Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. :param timeout: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and forecast_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "forecast_id": _quote(forecast_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast/{__path_parts["forecast_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_forecasts is not None: __query["allow_no_forecasts"] = allow_no_forecasts if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_forecast", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_job( self, *, job_id: str, delete_user_annotations: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an anomaly detection job.

All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request.

``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. :param force: Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. :param wait_for_completion: Specifies whether the request should return immediately or wait until the job deletion completes. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if delete_user_annotations is not None: __query["delete_user_annotations"] = delete_user_annotations if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_job", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_model_snapshot( self, *, job_id: str, snapshot_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a model snapshot.

You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the model_snapshot_id in the results from the get jobs API.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_trained_model( self, *, model_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an unreferenced trained model.

The request deletes a trained inference model that is not referenced by an ingest pipeline.

``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_trained_model_alias( self, *, model_id: str, model_alias: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a trained model alias.

This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the model_id, this API returns an error.

``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if model_alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_alias'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "model_alias": _quote(model_alias), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/model_aliases/{__path_parts["model_alias"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_trained_model_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "max_bucket_cardinality", "overall_cardinality", ), ) async def estimate_model_memory( self, *, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_bucket_cardinality: t.Optional[t.Mapping[str, int]] = None, overall_cardinality: t.Optional[t.Mapping[str, int]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Estimate job model memory usage.

Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references.

``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. :param max_bucket_cardinality: Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. :param overall_cardinality: Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_estimate_model_memory" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if max_bucket_cardinality is not None: __body["max_bucket_cardinality"] = max_bucket_cardinality if overall_cardinality is not None: __body["overall_cardinality"] = overall_cardinality __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.estimate_model_memory", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("evaluation", "index", "query"), ) async def evaluate_data_frame( self, *, evaluation: t.Optional[t.Mapping[str, t.Any]] = None, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate data frame analytics.

The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.

``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. :param query: A query clause that retrieves a subset of data from the source index. """ if evaluation is None and body is None: raise ValueError("Empty value passed for parameter 'evaluation'") if index is None and body is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {} __path = "/_ml/data_frame/_evaluate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if evaluation is not None: __body["evaluation"] = evaluation if index is not None: __body["index"] = index if query is not None: __body["query"] = query __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.evaluate_data_frame", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_start", "analysis", "analyzed_fields", "description", "dest", "max_num_threads", "model_memory_limit", "source", ), ) async def explain_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_lazy_start: t.Optional[bool] = None, analysis: t.Optional[t.Mapping[str, t.Any]] = None, analyzed_fields: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain data frame analytics config.

This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided:

  • which fields are included or not in the analysis and why,
  • how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation.
``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. :param analysis: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. :param analyzed_fields: Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. :param description: A description of the job. :param dest: The destination configuration, consisting of index and optionally results_field (ml by default). :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. :param source: The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_explain' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_explain" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if analysis is not None: __body["analysis"] = analysis if analyzed_fields is not None: __body["analyzed_fields"] = analyzed_fields if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if source is not None: __body["source"] = source if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.explain_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("advance_time", "calc_interim", "end", "skip_time", "start"), ) async def flush_job( self, *, job_id: str, advance_time: t.Optional[t.Union[str, t.Any]] = None, calc_interim: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, skip_time: t.Optional[t.Union[str, t.Any]] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.

``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. :param calc_interim: Refer to the description for the `calc_interim` query parameter. :param end: Refer to the description for the `end` query parameter. :param skip_time: Refer to the description for the `skip_time` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_flush' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if advance_time is not None: __body["advance_time"] = advance_time if calc_interim is not None: __body["calc_interim"] = calc_interim if end is not None: __body["end"] = end if skip_time is not None: __body["skip_time"] = skip_time if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.flush_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("duration", "expires_in", "max_model_memory"), ) async def forecast( self, *, job_id: str, duration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, error_trace: t.Optional[bool] = None, expires_in: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_model_memory: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Predict future behavior of a time series.

Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an over_field_name in its configuration. Forcasts predict future behavior based on historical data.

``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. :param duration: Refer to the description for the `duration` query parameter. :param expires_in: Refer to the description for the `expires_in` query parameter. :param max_model_memory: Refer to the description for the `max_model_memory` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if duration is not None: __body["duration"] = duration if expires_in is not None: __body["expires_in"] = expires_in if max_model_memory is not None: __body["max_model_memory"] = max_model_memory if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.forecast", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "anomaly_score", "desc", "end", "exclude_interim", "expand", "page", "sort", "start", ), parameter_aliases={"from": "from_"}, ) async def get_buckets( self, *, job_id: str, timestamp: t.Optional[t.Union[str, t.Any]] = None, anomaly_score: t.Optional[float] = None, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, expand: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket.

``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. :param anomaly_score: Refer to the description for the `anomaly_score` query parameter. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param expand: Refer to the description for the `expand` query parameter. :param from_: Skips the specified number of buckets. :param page: :param size: Specifies the maximum number of buckets to obtain. :param sort: Refer to the desription for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and timestamp not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id), "timestamp": _quote(timestamp)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/buckets/{__path_parts["timestamp"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/buckets' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if anomaly_score is not None: __body["anomaly_score"] = anomaly_score if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if expand is not None: __body["expand"] = expand if page is not None: __body["page"] = page if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_buckets", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_calendar_events( self, *, calendar_id: str, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, job_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get info about events in calendars.

``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. :param end: Specifies to get events with timestamps earlier than this time. :param from_: Skips the specified number of events. :param job_id: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. :param size: Specifies the maximum number of events to obtain. :param start: Specifies to get events with timestamps after this time. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events' __query: t.Dict[str, t.Any] = {} if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if job_id is not None: __query["job_id"] = job_id if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if start is not None: __query["start"] = start __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_calendar_events", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) async def get_calendars( self, *, calendar_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get calendar configuration info.

``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. :param from_: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. :param page: This object is supported only when you omit the calendar identifier. :param size: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. """ __path_parts: t.Dict[str, str] if calendar_id not in SKIP_IN_PATH: __path_parts = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' else: __path_parts = {} __path = "/_ml/calendars" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_calendars", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) async def get_categories( self, *, job_id: str, category_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, partition_field_value: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for categories.

``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. :param from_: Skips the specified number of categories. :param page: Configures pagination. This parameter has the `from` and `size` properties. :param partition_field_value: Only return categories for the specified partition. :param size: Specifies the maximum number of categories to obtain. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and category_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "category_id": _quote(category_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/categories/{__path_parts["category_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = ( f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/categories' ) else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if partition_field_value is not None: __query["partition_field_value"] = partition_field_value if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_categories", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.

``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of data frame analytics jobs. :param size: Specifies the maximum number of data frame analytics jobs to obtain. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ml/data_frame/analytics" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_data_frame_analytics_stats( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data frame analytics job stats.

``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. :param from_: Skips the specified number of data frame analytics jobs. :param size: Specifies the maximum number of data frame analytics jobs to obtain. :param verbose: Defines whether the stats response should be verbose. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_stats' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_data_frame_analytics_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_datafeed_stats( self, *, datafeed_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using _all, by specifying * as the <feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the only information you receive is the datafeed_id and the state. This API returns a maximum of 10,000 datafeeds.

``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_stats' else: __path_parts = {} __path = "/_ml/datafeeds/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_datafeed_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_datafeeds( self, *, datafeed_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using _all, by specifying * as the <feed_id>, or by omitting the <feed_id>. This API returns a maximum of 10,000 datafeeds.

``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' else: __path_parts = {} __path = "/_ml/datafeeds" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_datafeeds", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_filters( self, *, filter_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get filters. You can get a single filter or all filters.

``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. :param size: Specifies the maximum number of filters to obtain. """ __path_parts: t.Dict[str, str] if filter_id not in SKIP_IN_PATH: __path_parts = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' else: __path_parts = {} __path = "/_ml/filters" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_filters", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) async def get_influencers( self, *, job_id: str, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, influencer_score: t.Optional[float] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an influencer_field_name is specified in the job configuration.

``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. :param end: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. :param exclude_interim: If true, the output excludes interim results. By default, interim results are included. :param from_: Skips the specified number of influencers. :param influencer_score: Returns influencers with anomaly scores greater than or equal to this value. :param page: Configures pagination. This parameter has the `from` and `size` properties. :param size: Specifies the maximum number of influencers to obtain. :param sort: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. :param start: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/influencers' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if desc is not None: __query["desc"] = desc if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if exclude_interim is not None: __query["exclude_interim"] = exclude_interim if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if influencer_score is not None: __query["influencer_score"] = influencer_score if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if sort is not None: __query["sort"] = sort if start is not None: __query["start"] = start if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_influencers", path_parts=__path_parts, ) @_rewrite_parameters() async def get_job_stats( self, *, job_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job stats.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_stats' else: __path_parts = {} __path = "/_ml/anomaly_detectors/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_job_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_jobs( self, *, job_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_ml/anomaly_detectors" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_jobs", path_parts=__path_parts, ) @_rewrite_parameters() async def get_memory_stats( self, *, node_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.

``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_ml/memory/{__path_parts["node_id"]}/_stats' else: __path_parts = {} __path = "/_ml/memory/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_memory_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_model_snapshot_upgrade_stats( self, *, job_id: str, snapshot_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job model snapshot upgrade usage info.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no jobs that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_upgrade/_stats' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_model_snapshot_upgrade_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("desc", "end", "page", "sort", "start"), parameter_aliases={"from": "from_"}, ) async def get_model_snapshots( self, *, job_id: str, snapshot_id: t.Optional[str] = None, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get model snapshots info.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param from_: Skips the specified number of snapshots. :param page: :param size: Specifies the maximum number of snapshots to obtain. :param sort: Refer to the description for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and snapshot_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if page is not None: __body["page"] = page if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_model_snapshots", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_no_match", "bucket_span", "end", "exclude_interim", "overall_score", "start", "top_n", ), ) async def get_overall_buckets( self, *, job_id: str, allow_no_match: t.Optional[bool] = None, bucket_span: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, overall_score: t.Optional[t.Union[float, str]] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, top_n: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get overall bucket results.

Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs.

The overall_score is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum anomaly_score per anomaly detection job in the overall bucket is calculated. Then the top_n of those scores are averaged to result in the overall_score. This means that you can fine-tune the overall_score so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set top_n to 1, the overall_score is the maximum bucket score in the overall bucket. Alternatively, if you set top_n to the number of jobs, the overall_score is high only when all jobs detect anomalies in that overall bucket. If you set the bucket_span parameter (to a value greater than its default), the overall_score is the maximum overall_score of the overall buckets that have a span equal to the jobs' largest bucket span.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param bucket_span: Refer to the description for the `bucket_span` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param overall_score: Refer to the description for the `overall_score` query parameter. :param start: Refer to the description for the `start` query parameter. :param top_n: Refer to the description for the `top_n` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = ( f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/overall_buckets' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if bucket_span is not None: __body["bucket_span"] = bucket_span if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if overall_score is not None: __body["overall_score"] = overall_score if start is not None: __body["start"] = start if top_n is not None: __body["top_n"] = top_n if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_overall_buckets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "desc", "end", "exclude_interim", "page", "record_score", "sort", "start", ), parameter_aliases={"from": "from_"}, ) async def get_records( self, *, job_id: str, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, record_score: t.Optional[float] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors.

``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param from_: Skips the specified number of records. :param page: :param record_score: Refer to the description for the `record_score` query parameter. :param size: Specifies the maximum number of records to obtain. :param sort: Refer to the description for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/records' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if page is not None: __body["page"] = page if record_score is not None: __body["record_score"] = record_score if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_records", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_trained_models( self, *, model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, include: t.Optional[ t.Union[ str, t.Literal[ "definition", "definition_status", "feature_importance_baseline", "hyperparameters", "total_feature_importance", ], ] ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get trained model configuration info.

``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. :param decompress_definition: Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' else: __path_parts = {} __path = "/_ml/trained_models" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if decompress_definition is not None: __query["decompress_definition"] = decompress_definition if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if include is not None: __query["include"] = include if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if tags is not None: __query["tags"] = tags __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_trained_models", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_trained_models_stats( self, *, model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. :param from_: Skips the specified number of models. :param size: Specifies the maximum number of models to obtain. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/_stats' else: __path_parts = {} __path = "/_ml/trained_models/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_trained_models_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "inference_config"), ) async def infer_trained_model( self, *, model_id: str, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate a trained model.

``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. :param inference_config: The inference configuration updates to apply on the API call :param timeout: Controls the amount of time to wait for inference results. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/_infer' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if docs is not None: __body["docs"] = docs if inference_config is not None: __body["inference_config"] = inference_config __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.infer_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() async def info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("timeout",), ) async def open_job( self, *, job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open anomaly detection jobs.

An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.

``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_open' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.open_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("events",), ) async def post_calendar_events( self, *, calendar_id: str, events: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add scheduled events to the calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if events is None and body is None: raise ValueError("Empty value passed for parameter 'events'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if events is not None: __body["events"] = events __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.post_calendar_events", path_parts=__path_parts, ) @_rewrite_parameters( body_name="data", ) async def post_data( self, *, job_id: str, data: t.Optional[t.Sequence[t.Any]] = None, body: t.Optional[t.Sequence[t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reset_end: t.Optional[t.Union[str, t.Any]] = None, reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Send data to an anomaly detection job for analysis.

IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. :param data: :param reset_end: Specifies the end of the bucket resetting range. :param reset_start: Specifies the start of the bucket resetting range. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if data is None and body is None: raise ValueError( "Empty value passed for parameters 'data' and 'body', one of them should be set." ) elif data is not None and body is not None: raise ValueError("Cannot set both 'data' and 'body'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_data' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if reset_end is not None: __query["reset_end"] = reset_end if reset_start is not None: __query["reset_start"] = reset_start __body = data if data is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.post_data", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("config",), ) async def preview_data_frame_analytics( self, *, id: t.Optional[str] = None, config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config.

``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_preview' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if config is not None: __body["config"] = config if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.preview_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("datafeed_config", "job_config"), ) async def preview_datafeed( self, *, datafeed_id: t.Optional[str] = None, datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_config: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. :param datafeed_config: The datafeed definition to preview. :param end: The end time when the datafeed preview should stop :param job_config: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. :param start: The start time from where the datafeed preview should begin """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_preview' else: __path_parts = {} __path = "/_ml/datafeeds/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if start is not None: __query["start"] = start if not __body: if datafeed_config is not None: __body["datafeed_config"] = datafeed_config if job_config is not None: __body["job_config"] = job_config if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.preview_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "job_ids"), ) async def put_calendar( self, *, calendar_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_ids: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. :param job_ids: An array of anomaly detection job identifiers. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if job_ids is not None: __body["job_ids"] = job_ids if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_calendar", path_parts=__path_parts, ) @_rewrite_parameters() async def put_calendar_job( self, *, calendar_id: str, job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add anomaly detection job to calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "job_id": _quote(job_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/jobs/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="ml.put_calendar_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis", "dest", "source", "allow_lazy_start", "analyzed_fields", "description", "headers", "max_num_threads", "meta", "model_memory_limit", "version", ), parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) async def put_data_frame_analytics( self, *, id: str, analysis: t.Optional[t.Mapping[str, t.Any]] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_start: t.Optional[bool] = None, analyzed_fields: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is {"match_all": {}}.

If the destination index does not exist, it is created automatically when you start the job.

If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param analysis: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. :param dest: The destination configuration. :param source: The configuration of how to source the analysis data. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. :param analyzed_fields: Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. :param description: A description of the job. :param headers: :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. :param version: """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if analysis is None and body is None: raise ValueError("Empty value passed for parameter 'analysis'") if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis is not None: __body["analysis"] = analysis if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if analyzed_fields is not None: __body["analyzed_fields"] = analyzed_fields if description is not None: __body["description"] = description if headers is not None: __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if meta is not None: __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "chunking_config", "delayed_data_check_config", "frequency", "headers", "indexes", "indices", "indices_options", "job_id", "max_empty_searches", "query", "query_delay", "runtime_mappings", "script_fields", "scroll_size", ), ignore_deprecated_options={"headers"}, ) async def put_datafeed( self, *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indexes: t.Optional[t.Union[str, t.Sequence[str]]] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, indices_options: t.Optional[t.Mapping[str, t.Any]] = None, job_id: t.Optional[str] = None, max_empty_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, query_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll_size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (frequency). If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.

When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param aggs: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param chunking_config: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. :param delayed_data_check_config: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. :param headers: :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If true, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search :param job_id: Identifier for the anomaly detection job. :param max_empty_searches: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. :param query: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. :param query_delay: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. :param runtime_mappings: Specifies runtime fields for the datafeed search. :param script_fields: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. :param scroll_size: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: __body["delayed_data_check_config"] = delayed_data_check_config if frequency is not None: __body["frequency"] = frequency if headers is not None: __body["headers"] = headers if indexes is not None: __body["indexes"] = indexes if indices is not None: __body["indices"] = indices if indices_options is not None: __body["indices_options"] = indices_options if job_id is not None: __body["job_id"] = job_id if max_empty_searches is not None: __body["max_empty_searches"] = max_empty_searches if query is not None: __body["query"] = query if query_delay is not None: __body["query_delay"] = query_delay if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if scroll_size is not None: __body["scroll_size"] = scroll_size __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "items"), ) async def put_filter( self, *, filter_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, items: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the custom_rules property of detector configuration objects.

``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. :param items: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if items is not None: __body["items"] = items __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_filter", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "data_description", "allow_lazy_open", "analysis_limits", "background_persist_interval", "custom_settings", "daily_model_snapshot_retention_after_days", "datafeed_config", "description", "groups", "model_plot_config", "model_snapshot_retention_days", "renormalization_window_days", "results_index_name", "results_retention_days", ), ) async def put_job( self, *, job_id: str, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, custom_settings: t.Optional[t.Any] = None, daily_model_snapshot_retention_after_days: t.Optional[int] = None, datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, renormalization_window_days: t.Optional[int] = None, results_index_name: t.Optional[str] = None, results_retention_days: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an anomaly detection job.

If you include a datafeed_config, you must have read index privileges on the source index. If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param analysis_config: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. :param data_description: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. :param allow_lazy_open: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. :param allow_no_indices: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. :param background_persist_interval: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. :param custom_settings: Advanced configuration option. Contains custom meta data about the job. :param daily_model_snapshot_retention_after_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. :param datafeed_config: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If `true`, unavailable indices (missing or closed) are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. :param model_snapshot_retention_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. :param renormalization_window_days: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. :param results_index_name: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. :param results_retention_days: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if analysis_config is None and body is None: raise ValueError("Empty value passed for parameter 'analysis_config'") if data_description is None and body is None: raise ValueError("Empty value passed for parameter 'data_description'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if data_description is not None: __body["data_description"] = data_description if allow_lazy_open is not None: __body["allow_lazy_open"] = allow_lazy_open if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if background_persist_interval is not None: __body["background_persist_interval"] = background_persist_interval if custom_settings is not None: __body["custom_settings"] = custom_settings if daily_model_snapshot_retention_after_days is not None: __body["daily_model_snapshot_retention_after_days"] = ( daily_model_snapshot_retention_after_days ) if datafeed_config is not None: __body["datafeed_config"] = datafeed_config if description is not None: __body["description"] = description if groups is not None: __body["groups"] = groups if model_plot_config is not None: __body["model_plot_config"] = model_plot_config if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if renormalization_window_days is not None: __body["renormalization_window_days"] = renormalization_window_days if results_index_name is not None: __body["results_index_name"] = results_index_name if results_retention_days is not None: __body["results_retention_days"] = results_retention_days __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "compressed_definition", "definition", "description", "inference_config", "input", "metadata", "model_size_bytes", "model_type", "platform_architecture", "prefix_strings", "tags", ), ) async def put_trained_model( self, *, model_id: str, compressed_definition: t.Optional[str] = None, defer_definition_decompression: t.Optional[bool] = None, definition: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Any] = None, model_size_bytes: t.Optional[int] = None, model_type: t.Optional[ t.Union[str, t.Literal["lang_ident", "pytorch", "tree_ensemble"]] ] = None, platform_architecture: t.Optional[str] = None, prefix_strings: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, tags: t.Optional[t.Sequence[str]] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a trained model. Enable you to supply a trained model that is not created by data frame analytics.

``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. :param defer_definition_decompression: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. :param definition: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. :param description: A human-readable description of the inference trained model. :param inference_config: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. :param input: The input field names for the model definition. :param metadata: An object map that contains metadata about the model. :param model_size_bytes: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. :param model_type: The model type. :param platform_architecture: The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. :param prefix_strings: Optional prefix strings applied at inference :param tags: An array of tags to organize the model. :param wait_for_completion: Whether to wait for all child operations (e.g. model download) to complete. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_definition_decompression is not None: __query["defer_definition_decompression"] = defer_definition_decompression if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if compressed_definition is not None: __body["compressed_definition"] = compressed_definition if definition is not None: __body["definition"] = definition if description is not None: __body["description"] = description if inference_config is not None: __body["inference_config"] = inference_config if input is not None: __body["input"] = input if metadata is not None: __body["metadata"] = metadata if model_size_bytes is not None: __body["model_size_bytes"] = model_size_bytes if model_type is not None: __body["model_type"] = model_type if platform_architecture is not None: __body["platform_architecture"] = platform_architecture if prefix_strings is not None: __body["prefix_strings"] = prefix_strings if tags is not None: __body["tags"] = tags __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() async def put_trained_model_alias( self, *, model_id: str, model_alias: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.

``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. :param reassign: Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if model_alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_alias'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "model_alias": _quote(model_alias), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/model_aliases/{__path_parts["model_alias"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if reassign is not None: __query["reassign"] = reassign __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="ml.put_trained_model_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("definition", "total_definition_length", "total_parts"), ) async def put_trained_model_definition_part( self, *, model_id: str, part: int, definition: t.Optional[str] = None, total_definition_length: t.Optional[int] = None, total_parts: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create part of a trained model definition.

``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. :param definition: The definition part for the model. Must be a base64 encoded string. :param total_definition_length: The total uncompressed definition length in bytes. Not base64 encoded. :param total_parts: The total number of parts that will be uploaded. Must be greater than 0. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if part in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'part'") if definition is None and body is None: raise ValueError("Empty value passed for parameter 'definition'") if total_definition_length is None and body is None: raise ValueError( "Empty value passed for parameter 'total_definition_length'" ) if total_parts is None and body is None: raise ValueError("Empty value passed for parameter 'total_parts'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "part": _quote(part), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/definition/{__path_parts["part"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if definition is not None: __body["definition"] = definition if total_definition_length is not None: __body["total_definition_length"] = total_definition_length if total_parts is not None: __body["total_parts"] = total_parts __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model_definition_part", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("vocabulary", "merges", "scores"), ) async def put_trained_model_vocabulary( self, *, model_id: str, vocabulary: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, merges: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, scores: t.Optional[t.Sequence[float]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. :param merges: The optional model merges if required by the tokenizer. :param scores: The optional vocabulary value scores if required by the tokenizer. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if vocabulary is None and body is None: raise ValueError("Empty value passed for parameter 'vocabulary'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/vocabulary' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if vocabulary is not None: __body["vocabulary"] = vocabulary if merges is not None: __body["merges"] = merges if scores is not None: __body["scores"] = scores __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model_vocabulary", path_parts=__path_parts, ) @_rewrite_parameters() async def reset_job( self, *, job_id: str, delete_user_annotations: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.

``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. :param wait_for_completion: Should this request wait until the operation has completed before returning. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_reset' __query: t.Dict[str, t.Any] = {} if delete_user_annotations is not None: __query["delete_user_annotations"] = delete_user_annotations if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.reset_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("delete_intervening_results",), ) async def revert_model_snapshot( self, *, job_id: str, snapshot_id: str, delete_intervening_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. :param delete_intervening_results: Refer to the description for the `delete_intervening_results` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_revert' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if delete_intervening_results is not None: __body["delete_intervening_results"] = delete_intervening_results if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.revert_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters() async def set_upgrade_mode( self, *, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API.

``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. :param timeout: The time to wait for the request to be completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/set_upgrade_mode" __query: t.Dict[str, t.Any] = {} if enabled is not None: __query["enabled"] = enabled if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.set_upgrade_mode", path_parts=__path_parts, ) @_rewrite_parameters() async def start_data_frame_analytics( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The index.number_of_shards and index.number_of_replicas settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param timeout: Controls the amount of time to wait until the data frame analytics job starts. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.start_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("end", "start", "timeout"), ) async def start_datafeed( self, *, datafeed_id: str, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start datafeeds.

A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.

Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.

If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored.

When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param end: Refer to the description for the `end` query parameter. :param start: Refer to the description for the `start` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_start' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if end is not None: __body["end"] = end if start is not None: __body["start"] = start if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.start_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("adaptive_allocations",), ) async def start_trained_model_deployment( self, *, model_id: str, adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, cache_size: t.Optional[t.Union[int, str]] = None, deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, number_of_allocations: t.Optional[int] = None, pretty: t.Optional[bool] = None, priority: t.Optional[t.Union[str, t.Literal["low", "normal"]]] = None, queue_capacity: t.Optional[int] = None, threads_per_allocation: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for: t.Optional[ t.Union[str, t.Literal["fully_allocated", "started", "starting"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a trained model deployment. It allocates the model to every machine learning node.

``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. :param adaptive_allocations: Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. :param deployment_id: A unique identifier for the deployment of the model. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. :param priority: The deployment priority. :param queue_capacity: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. :param threads_per_allocation: Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. :param timeout: Specifies the amount of time to wait for the model to deploy. :param wait_for: Specifies the allocation status to wait for before returning. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cache_size is not None: __query["cache_size"] = cache_size if deployment_id is not None: __query["deployment_id"] = deployment_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if number_of_allocations is not None: __query["number_of_allocations"] = number_of_allocations if pretty is not None: __query["pretty"] = pretty if priority is not None: __query["priority"] = priority if queue_capacity is not None: __query["queue_capacity"] = queue_capacity if threads_per_allocation is not None: __query["threads_per_allocation"] = threads_per_allocation if timeout is not None: __query["timeout"] = timeout if wait_for is not None: __query["wait_for"] = wait_for if not __body: if adaptive_allocations is not None: __body["adaptive_allocations"] = adaptive_allocations if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.start_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters() async def stop_data_frame_analytics( self, *, id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param force: If true, the data frame analytics job is stopped forcefully. :param timeout: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.stop_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("allow_no_match", "force", "timeout"), ) async def stop_datafeed( self, *, datafeed_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.

``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param force: Refer to the description for the `force` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_stop' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if force is not None: __body["force"] = force if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.stop_datafeed", path_parts=__path_parts, ) @_rewrite_parameters() async def stop_trained_model_deployment( self, *, model_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop a trained model deployment.

``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. :param force: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.stop_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_start", "description", "max_num_threads", "model_memory_limit", ), ) async def update_data_frame_analytics( self, *, id: str, allow_lazy_start: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a data frame analytics job.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. :param description: A description of the job. :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if description is not None: __body["description"] = description if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "chunking_config", "delayed_data_check_config", "frequency", "indexes", "indices", "indices_options", "job_id", "max_empty_searches", "query", "query_delay", "runtime_mappings", "script_fields", "scroll_size", ), ) async def update_datafeed( self, *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indexes: t.Optional[t.Sequence[str]] = None, indices: t.Optional[t.Sequence[str]] = None, indices_options: t.Optional[t.Mapping[str, t.Any]] = None, job_id: t.Optional[str] = None, max_empty_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, query_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll_size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param chunking_config: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. :param delayed_data_check_config: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If `true`, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search. :param job_id: :param max_empty_searches: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. :param query: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. :param query_delay: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. :param runtime_mappings: Specifies runtime fields for the datafeed search. :param script_fields: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. :param scroll_size: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if aggregations is not None: __body["aggregations"] = aggregations if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: __body["delayed_data_check_config"] = delayed_data_check_config if frequency is not None: __body["frequency"] = frequency if indexes is not None: __body["indexes"] = indexes if indices is not None: __body["indices"] = indices if indices_options is not None: __body["indices_options"] = indices_options if job_id is not None: __body["job_id"] = job_id if max_empty_searches is not None: __body["max_empty_searches"] = max_empty_searches if query is not None: __body["query"] = query if query_delay is not None: __body["query_delay"] = query_delay if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if scroll_size is not None: __body["scroll_size"] = scroll_size __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("add_items", "description", "remove_items"), ) async def update_filter( self, *, filter_id: str, add_items: t.Optional[t.Sequence[str]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, remove_items: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a filter. Updates the description of a filter, adds items, or removes items from the list.

``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. :param description: A description for the filter. :param remove_items: The items to remove from the filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if add_items is not None: __body["add_items"] = add_items if description is not None: __body["description"] = description if remove_items is not None: __body["remove_items"] = remove_items __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_filter", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_open", "analysis_limits", "background_persist_interval", "categorization_filters", "custom_settings", "daily_model_snapshot_retention_after_days", "description", "detectors", "groups", "model_plot_config", "model_prune_window", "model_snapshot_retention_days", "per_partition_categorization", "renormalization_window_days", "results_retention_days", ), ) async def update_job( self, *, job_id: str, allow_lazy_open: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, categorization_filters: t.Optional[t.Sequence[str]] = None, custom_settings: t.Optional[t.Mapping[str, t.Any]] = None, daily_model_snapshot_retention_after_days: t.Optional[int] = None, description: t.Optional[str] = None, detectors: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_prune_window: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, model_snapshot_retention_days: t.Optional[int] = None, per_partition_categorization: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, renormalization_window_days: t.Optional[int] = None, results_retention_days: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an anomaly detection job. Updates certain properties of an anomaly detection job.

``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. :param analysis_limits: :param background_persist_interval: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. :param categorization_filters: :param custom_settings: Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. :param daily_model_snapshot_retention_after_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. :param description: A description of the job. :param detectors: An array of detector update objects. :param groups: A list of job groups. A job can belong to no groups or many. :param model_plot_config: :param model_prune_window: :param model_snapshot_retention_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. :param per_partition_categorization: Settings related to how categorization interacts with partition fields. :param renormalization_window_days: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. :param results_retention_days: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_open is not None: __body["allow_lazy_open"] = allow_lazy_open if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if background_persist_interval is not None: __body["background_persist_interval"] = background_persist_interval if categorization_filters is not None: __body["categorization_filters"] = categorization_filters if custom_settings is not None: __body["custom_settings"] = custom_settings if daily_model_snapshot_retention_after_days is not None: __body["daily_model_snapshot_retention_after_days"] = ( daily_model_snapshot_retention_after_days ) if description is not None: __body["description"] = description if detectors is not None: __body["detectors"] = detectors if groups is not None: __body["groups"] = groups if model_plot_config is not None: __body["model_plot_config"] = model_plot_config if model_prune_window is not None: __body["model_prune_window"] = model_prune_window if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if per_partition_categorization is not None: __body["per_partition_categorization"] = per_partition_categorization if renormalization_window_days is not None: __body["renormalization_window_days"] = renormalization_window_days if results_retention_days is not None: __body["results_retention_days"] = results_retention_days __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "retain"), ) async def update_model_snapshot( self, *, job_id: str, snapshot_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, retain: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a snapshot. Updates certain properties of a snapshot.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. :param description: A description of the model snapshot. :param retain: If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if retain is not None: __body["retain"] = retain __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("adaptive_allocations", "number_of_allocations"), ) async def update_trained_model_deployment( self, *, model_id: str, adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, number_of_allocations: t.Optional[int] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a trained model deployment.

``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. :param adaptive_allocations: Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if adaptive_allocations is not None: __body["adaptive_allocations"] = adaptive_allocations if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters() async def upgrade_job_snapshot( self, *, job_id: str, snapshot_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. :param timeout: Controls the time to wait for the request to complete. :param wait_for_completion: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_upgrade' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.upgrade_job_snapshot", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "analysis_limits", "data_description", "description", "job_id", "model_plot", "model_snapshot_id", "model_snapshot_retention_days", "results_index_name", ), ) async def validate( self, *, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_id: t.Optional[str] = None, model_plot: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_id: t.Optional[str] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, results_index_name: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate an anomaly detection job.

``_ :param analysis_config: :param analysis_limits: :param data_description: :param description: :param job_id: :param model_plot: :param model_snapshot_id: :param model_snapshot_retention_days: :param results_index_name: """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_validate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if data_description is not None: __body["data_description"] = data_description if description is not None: __body["description"] = description if job_id is not None: __body["job_id"] = job_id if model_plot is not None: __body["model_plot"] = model_plot if model_snapshot_id is not None: __body["model_snapshot_id"] = model_snapshot_id if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if results_index_name is not None: __body["results_index_name"] = results_index_name __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.validate", path_parts=__path_parts, ) @_rewrite_parameters( body_name="detector", ) async def validate_detector( self, *, detector: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate an anomaly detection job.

``_ :param detector: """ if detector is None and body is None: raise ValueError( "Empty value passed for parameters 'detector' and 'body', one of them should be set." ) elif detector is not None and body is not None: raise ValueError("Cannot set both 'detector' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_validate/detector" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = detector if detector is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.validate_detector", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/monitoring.py000066400000000000000000000074111506101734100257570ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class MonitoringClient(NamespacedClient): @_rewrite_parameters( body_name="operations", ) async def bulk( self, *, interval: t.Union[str, t.Literal[-1], t.Literal[0]], operations: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, system_api_version: str, system_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Send monitoring data. This API is used by the monitoring features to send monitoring data.

``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: :param system_api_version: :param system_id: Identifier of the monitored system """ if interval is None: raise ValueError("Empty value passed for parameter 'interval'") if operations is None and body is None: raise ValueError( "Empty value passed for parameters 'operations' and 'body', one of them should be set." ) elif operations is not None and body is not None: raise ValueError("Cannot set both 'operations' and 'body'") if system_api_version is None: raise ValueError("Empty value passed for parameter 'system_api_version'") if system_id is None: raise ValueError("Empty value passed for parameter 'system_id'") __path_parts: t.Dict[str, str] = {} __path = "/_monitoring/bulk" __query: t.Dict[str, t.Any] = {} if interval is not None: __query["interval"] = interval if system_api_version is not None: __query["system_api_version"] = system_api_version if system_id is not None: __query["system_id"] = system_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = operations if operations is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="monitoring.bulk", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/nodes.py000066400000000000000000000567361506101734100247200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class NodesClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def clear_repositories_metering_archive( self, *, node_id: t.Union[str, t.Sequence[str]], max_archive_version: int, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the archived repositories metering. Clear the archived repositories metering information in the cluster.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param max_archive_version: Specifies the maximum `archive_version` to be cleared from the archive. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") if max_archive_version in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'max_archive_version'") __path_parts: t.Dict[str, str] = { "node_id": _quote(node_id), "max_archive_version": _quote(max_archive_version), } __path = f'/_nodes/{__path_parts["node_id"]}/_repositories_metering/{__path_parts["max_archive_version"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="nodes.clear_repositories_metering_archive", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_repositories_metering_info( self, *, node_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/_repositories_metering' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.get_repositories_metering_info", path_parts=__path_parts, ) @_rewrite_parameters() async def hot_threads( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_idle_threads: t.Optional[bool] = None, interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, snapshots: t.Optional[int] = None, sort: t.Optional[ t.Union[str, t.Literal["block", "cpu", "gpu", "mem", "wait"]] ] = None, threads: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, type: t.Optional[ t.Union[str, t.Literal["block", "cpu", "gpu", "mem", "wait"]] ] = None, ) -> TextApiResponse: """ .. raw:: html

Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node.

``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. :param interval: The interval to do the second sampling of threads. :param snapshots: Number of samples of thread stacktrace. :param sort: The sort order for 'cpu' type (default: total) :param threads: Specifies the number of hot threads to provide information for. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param type: The type to sample. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/hot_threads' else: __path_parts = {} __path = "/_nodes/hot_threads" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_idle_threads is not None: __query["ignore_idle_threads"] = ignore_idle_threads if interval is not None: __query["interval"] = interval if pretty is not None: __query["pretty"] = pretty if snapshots is not None: __query["snapshots"] = snapshots if sort is not None: __query["sort"] = sort if threads is not None: __query["threads"] = threads if timeout is not None: __query["timeout"] = timeout if type is not None: __query["type"] = type __headers = {"accept": "text/plain"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.hot_threads", path_parts=__path_parts, ) @_rewrite_parameters() async def info( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get node information.

By default, the API returns all attributes and core settings for cluster nodes.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param metric: Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. :param flat_settings: If true, returns settings in flat format. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/{__path_parts["metric"]}' elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("secure_settings_password",), ) async def reload_secure_settings( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, secure_settings_password: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reload the keystore on nodes in the cluster.

Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node.

When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/reload_secure_settings' else: __path_parts = {} __path = "/_nodes/reload_secure_settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if secure_settings_password is not None: __body["secure_settings_password"] = secure_settings_password if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="nodes.reload_secure_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, index_metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[bool] = None, human: t.Optional[bool] = None, include_segment_file_sizes: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param metric: Limit the information returned to the specified metrics :param index_metric: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. :param completion_fields: Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. :param fielddata_fields: Comma-separated list or wildcard expressions of fields to include in fielddata statistics. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param groups: Comma-separated list of search groups to include in the search statistics. :param include_segment_file_sizes: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). :param include_unloaded_segments: If `true`, the response includes information from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param types: A comma-separated list of document types for the indexing index metric. """ __path_parts: t.Dict[str, str] if ( node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH and index_metric not in SKIP_IN_PATH ): __path_parts = { "node_id": _quote(node_id), "metric": _quote(metric), "index_metric": _quote(index_metric), } __path = f'/_nodes/{__path_parts["node_id"]}/stats/{__path_parts["metric"]}/{__path_parts["index_metric"]}' elif node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/stats/{__path_parts["metric"]}' elif metric not in SKIP_IN_PATH and index_metric not in SKIP_IN_PATH: __path_parts = { "metric": _quote(metric), "index_metric": _quote(index_metric), } __path = ( f'/_nodes/stats/{__path_parts["metric"]}/{__path_parts["index_metric"]}' ) elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/stats' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes/stats" __query: t.Dict[str, t.Any] = {} if completion_fields is not None: __query["completion_fields"] = completion_fields if error_trace is not None: __query["error_trace"] = error_trace if fielddata_fields is not None: __query["fielddata_fields"] = fielddata_fields if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if groups is not None: __query["groups"] = groups if human is not None: __query["human"] = human if include_segment_file_sizes is not None: __query["include_segment_file_sizes"] = include_segment_file_sizes if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if types is not None: __query["types"] = types __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.stats", path_parts=__path_parts, ) @_rewrite_parameters() async def usage( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get feature usage information.

``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :param metric: Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/usage/{__path_parts["metric"]}' elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/usage' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/usage/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes/usage" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.usage", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/query_rules.py000066400000000000000000000457471506101734100261670ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class QueryRulesClient(NamespacedClient): @_rewrite_parameters() async def delete_rule( self, *, ruleset_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete :param rule_id: The unique identifier of the query rule within the specified ruleset to delete """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="query_rules.delete_rule", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_ruleset( self, *, ruleset_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable.

``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="query_rules.delete_ruleset", path_parts=__path_parts, ) @_rewrite_parameters() async def get_rule( self, *, ruleset_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a query rule. Get details about a query rule within a query ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve :param rule_id: The unique identifier of the query rule within the specified ruleset to retrieve """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.get_rule", path_parts=__path_parts, ) @_rewrite_parameters() async def get_ruleset( self, *, ruleset_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a query ruleset. Get details about a query ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.get_ruleset", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def list_rulesets( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all query rulesets. Get summarized information about the query rulesets.

``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.list_rulesets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions", "criteria", "type", "priority"), ) async def put_rule( self, *, ruleset_id: str, rule_id: str, actions: t.Optional[t.Mapping[str, t.Any]] = None, criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a query rule. Create or update a query rule within a query ruleset.

IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified ruleset to be created or updated. :param actions: The actions to take when the rule is matched. The format of this action depends on the rule type. :param criteria: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") if actions is None and body is None: raise ValueError("Empty value passed for parameter 'actions'") if criteria is None and body is None: raise ValueError("Empty value passed for parameter 'criteria'") if type is None and body is None: raise ValueError("Empty value passed for parameter 'type'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if actions is not None: __body["actions"] = actions if criteria is not None: __body["criteria"] = criteria if type is not None: __body["type"] = type if priority is not None: __body["priority"] = priority __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.put_rule", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("rules",), ) async def put_ruleset( self, *, ruleset_id: str, rules: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.

IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rules is None and body is None: raise ValueError("Empty value passed for parameter 'rules'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if rules is not None: __body["rules"] = rules __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("match_criteria",), ) async def test( self, *, ruleset_id: str, match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated :param match_criteria: The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if match_criteria is None and body is None: raise ValueError("Empty value passed for parameter 'match_criteria'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if match_criteria is not None: __body["match_criteria"] = match_criteria __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.test", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/rollup.py000066400000000000000000000666411506101734100251210ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class RollupClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def delete_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a rollup job.

A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs.

IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:

POST my_rollup_index/_delete_by_query
          {
            "query": {
              "term": {
                "_rollup.id": "the_rollup_job_id"
              }
            }
          }
          
``_ :param id: Identifier for the job. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="rollup.delete_job", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_jobs( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get rollup job information. Get the configuration, stats, and status of rollup jobs.

NOTE: This API returns only active (both STARTED and STOPPED) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful.

``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' else: __path_parts = {} __path = "/_rollup/job" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_jobs", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_rollup_caps( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.

This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine:

  1. Does this index have associated rollup data somewhere in the cluster?
  2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?
``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_rollup/data/{__path_parts["id"]}' else: __path_parts = {} __path = "/_rollup/data" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_rollup_caps", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_rollup_index_caps( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:

  • What jobs are stored in an index (or indices specified via a pattern)?
  • What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?
``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rollup/data' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_rollup_index_caps", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "cron", "groups", "index_pattern", "page_size", "rollup_index", "headers", "metrics", "timeout", ), ignore_deprecated_options={"headers"}, ) @_stability_warning(Stability.EXPERIMENTAL) async def put_job( self, *, id: str, cron: t.Optional[str] = None, groups: t.Optional[t.Mapping[str, t.Any]] = None, index_pattern: t.Optional[str] = None, page_size: t.Optional[int] = None, rollup_index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, metrics: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a rollup job.

WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.

The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.

There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.

Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. :param cron: A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. :param groups: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. :param index_pattern: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. :param page_size: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. :param rollup_index: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. :param headers: :param metrics: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. :param timeout: Time to wait for the request to complete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if cron is None and body is None: raise ValueError("Empty value passed for parameter 'cron'") if groups is None and body is None: raise ValueError("Empty value passed for parameter 'groups'") if index_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'index_pattern'") if page_size is None and body is None: raise ValueError("Empty value passed for parameter 'page_size'") if rollup_index is None and body is None: raise ValueError("Empty value passed for parameter 'rollup_index'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if cron is not None: __body["cron"] = cron if groups is not None: __body["groups"] = groups if index_pattern is not None: __body["index_pattern"] = index_pattern if page_size is not None: __body["page_size"] = page_size if rollup_index is not None: __body["rollup_index"] = rollup_index if headers is not None: __body["headers"] = headers if metrics is not None: __body["metrics"] = metrics if timeout is not None: __body["timeout"] = timeout __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="rollup.put_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aggregations", "aggs", "query", "size"), ) @_stability_warning(Stability.EXPERIMENTAL) async def rollup_search( self, *, index: t.Union[str, t.Sequence[str]], aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rest_total_hits_as_int: t.Optional[bool] = None, size: t.Optional[int] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.

The request body supports a subset of features from the regular search API. The following functionality is not available:

size: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. highlighter, suggestors, post_filter, profile, explain: These are similarly disallowed.

For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.

``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. :param aggregations: Specifies aggregations. :param aggs: Specifies aggregations. :param query: Specifies a DSL query that is subject to some limitations. :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param size: Must be zero if set, as rollups work on pre-aggregated data. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rollup_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if query is not None: __body["query"] = query if size is not None: __body["size"] = size __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="rollup.rollup_search", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def start_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens.

``_ :param id: Identifier for the rollup job. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="rollup.start_job", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def stop_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens.

Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:

POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
          

The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. :param wait_for_completion: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}/_stop' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="rollup.stop_job", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/search_application.py000066400000000000000000000532031506101734100274220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SearchApplicationClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.BETA) async def delete( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a search application.

Remove a search application and its associated alias. Indices attached to the search application are not removed.

``_ :param name: The name of the search application to delete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="search_application.delete", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def delete_behavioral_analytics( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a behavioral analytics collection. The associated data stream is also deleted.

``_ :param name: The name of the analytics collection to be deleted """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="search_application.delete_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) async def get( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get search application details.

``_ :param name: The name of the search application """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.get", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get_behavioral_analytics( self, *, name: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get behavioral analytics collections.

``_ :param name: A list of analytics collections to limit the returned information """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' else: __path_parts = {} __path = "/_application/analytics" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.get_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) async def list( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get search applications. Get information about search applications.

``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. :param size: Specifies a max number of results to get. """ __path_parts: t.Dict[str, str] = {} __path = "/_application/search_application" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.list", path_parts=__path_parts, ) @_rewrite_parameters( body_name="payload", ) @_stability_warning(Stability.EXPERIMENTAL) async def post_behavioral_analytics_event( self, *, collection_name: str, event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], payload: t.Optional[t.Any] = None, body: t.Optional[t.Any] = None, debug: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a behavioral analytics collection event.

``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. :param payload: :param debug: Whether the response type has to include more details """ if collection_name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'collection_name'") if event_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'event_type'") if payload is None and body is None: raise ValueError( "Empty value passed for parameters 'payload' and 'body', one of them should be set." ) elif payload is not None and body is not None: raise ValueError("Cannot set both 'payload' and 'body'") __path_parts: t.Dict[str, str] = { "collection_name": _quote(collection_name), "event_type": _quote(event_type), } __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' __query: t.Dict[str, t.Any] = {} if debug is not None: __query["debug"] = debug if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = payload if payload is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.post_behavioral_analytics_event", path_parts=__path_parts, ) @_rewrite_parameters( body_name="search_application", ) @_stability_warning(Stability.BETA) async def put( self, *, name: str, search_application: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a search application.

``_ :param name: The name of the search application to be created or updated. :param search_application: :param create: If `true`, this request cannot replace or update existing Search Applications. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if search_application is None and body is None: raise ValueError( "Empty value passed for parameters 'search_application' and 'body', one of them should be set." ) elif search_application is not None and body is not None: raise ValueError("Cannot set both 'search_application' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = search_application if search_application is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.put", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def put_behavioral_analytics( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a behavioral analytics collection.

``_ :param name: The name of the analytics collection to be created or updated. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="search_application.put_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, ) @_stability_warning(Stability.EXPERIMENTAL) async def render_query( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in params, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.

You must have read privileges on the backing alias of the search application.

``_ :param name: The name of the search application to render teh query for. :param params: """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = ( f'/_application/search_application/{__path_parts["name"]}/_render_query' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if params is not None: __body["params"] = params if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.render_query", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, ) @_stability_warning(Stability.BETA) async def search( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable.

``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override any defaults specified in the template. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}/_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if params is not None: __body["params"] = params if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/searchable_snapshots.py000066400000000000000000000306421506101734100277670ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SearchableSnapshotsClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def cache_stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cache statistics. Get statistics about the shared cache for partially mounted indices.

``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_searchable_snapshots/{__path_parts["node_id"]}/cache/stats' else: __path_parts = {} __path = "/_searchable_snapshots/cache/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.cache_stats", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def clear_cache( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices.

``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_searchable_snapshots/cache/clear' else: __path_parts = {} __path = "/_searchable_snapshots/cache/clear" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.clear_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "index", "ignore_index_settings", "index_settings", "renamed_index", ), ) async def mount( self, *, repository: str, snapshot: str, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_index_settings: t.Optional[t.Sequence[str]] = None, index_settings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, renamed_index: t.Optional[str] = None, storage: t.Optional[str] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes.

``_ :param repository: The name of the repository containing the snapshot of the index to mount. :param snapshot: The name of the snapshot of the index to mount. :param index: The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. :param ignore_index_settings: The names of settings that should be removed from the index when it is mounted. :param index_settings: The settings that should be added to the index when it is mounted. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param renamed_index: The name of the index that will be created. :param storage: The mount option for the searchable snapshot index. :param wait_for_completion: If true, the request blocks until the operation is complete. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") if index is None and body is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = ( f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_mount' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if storage is not None: __query["storage"] = storage if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if index is not None: __body["index"] = index if ignore_index_settings is not None: __body["ignore_index_settings"] = ignore_index_settings if index_settings is not None: __body["index_settings"] = index_settings if renamed_index is not None: __body["renamed_index"] = renamed_index __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="searchable_snapshots.mount", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get searchable snapshot statistics.

``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. :param level: Return stats aggregated at cluster, index or shard level """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_searchable_snapshots/stats' else: __path_parts = {} __path = "/_searchable_snapshots/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/security.py000066400000000000000000006623701506101734100254540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SecurityClient(NamespacedClient): @_rewrite_parameters( body_fields=("grant_type", "access_token", "password", "username"), ) async def activate_user_profile( self, *, grant_type: t.Optional[ t.Union[str, t.Literal["access_token", "password"]] ] = None, access_token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate a user profile.

Create or update a user profile on behalf of another user.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an access_token or a combination of username and password for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice.

This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including username, full_name, roles, and the authentication realm. For example, in the JWT access_token case, the profile user's username is extracted from the JWT token claim pointed to by the claims.principal setting of the JWT realm that authenticated the token.

When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the labels or data fields.

``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param username: The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. """ if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_activate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grant_type is not None: __body["grant_type"] = grant_type if access_token is not None: __body["access_token"] = access_token if password is not None: __body["password"] = password if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.activate_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() async def authenticate( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate a user.

Authenticates a user and returns information about the authenticated user. Include the user information in a basic auth header. A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("names",), ) async def bulk_delete_role( self, *, names: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk delete roles.

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files.

``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if names is None and body is None: raise ValueError("Empty value passed for parameter 'names'") __path_parts: t.Dict[str, str] = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if names is not None: __body["names"] = names __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_delete_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("roles",), ) async def bulk_put_role( self, *, roles: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk create or update roles.

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files.

``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if roles is None and body is None: raise ValueError("Empty value passed for parameter 'roles'") __path_parts: t.Dict[str, str] = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if roles is not None: __body["roles"] = roles __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_put_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("ids", "expiration", "metadata", "role_descriptors"), ) async def bulk_update_api_keys( self, *, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk update API keys. Update the attributes for multiple API keys.

IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.

This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.

It is not possible to update expired or invalidated API keys.

This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call.

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. :param role_descriptors: The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. """ if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key/_bulk_update" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if ids is not None: __body["ids"] = ids if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if role_descriptors is not None: __body["role_descriptors"] = role_descriptors __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_update_api_keys", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("password", "password_hash"), ) async def change_password( self, *, username: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, password_hash: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Change passwords.

Change the passwords of users in the native realm and built-in users.

``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. :param password: The new password value. Passwords must be at least 6 characters long. :param password_hash: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: __path_parts = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_password' else: __path_parts = {} __path = "/_security/user/_password" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if password is not None: __body["password"] = password if password_hash is not None: __body["password_hash"] = password_hash __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.change_password", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_api_key_cache( self, *, ids: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the API key cache.

Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index.

``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. """ if ids in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {"ids": _quote(ids)} __path = f'/_security/api_key/{__path_parts["ids"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_api_key_cache", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_cached_privileges( self, *, application: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the privileges cache.

Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated.

``_ :param application: A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") __path_parts: t.Dict[str, str] = {"application": _quote(application)} __path = f'/_security/privilege/{__path_parts["application"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_privileges", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_cached_realms( self, *, realms: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the user cache.

Evict users from the user cache. You can completely clear the cache or evict specific users.

User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache.

``_ :param realms: A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. :param usernames: A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. """ if realms in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realms'") __path_parts: t.Dict[str, str] = {"realms": _quote(realms)} __path = f'/_security/realm/{__path_parts["realms"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if usernames is not None: __query["usernames"] = usernames __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_realms", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_cached_roles( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the roles cache.

Evict roles from the native role cache.

``_ :param name: A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_roles", path_parts=__path_parts, ) @_rewrite_parameters() async def clear_cached_service_tokens( self, *, namespace: str, service: str, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear service account token caches.

Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the service_tokens file, and another for tokens backed by the .security index. This API clears matching entries from both caches.

The cache for service account tokens backed by the .security index is cleared automatically on state changes of the security index. The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The name of the service, which must be unique within its namespace. :param name: A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_service_tokens", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("expiration", "metadata", "name", "role_descriptors"), ) async def create_api_key( self, *, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an API key.

Create an API key for access without requiring basic authentication.

IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error.

A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.

The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation.

``_ :param expiration: The expiration time for the API key. By default, API keys never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. :param name: A name for the API key. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param role_descriptors: An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if name is not None: __body["name"] = name if role_descriptors is not None: __body["role_descriptors"] = role_descriptors __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.create_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("access", "name", "expiration", "metadata"), ) async def create_cross_cluster_api_key( self, *, access: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a cross-cluster API key.

Create an API key of the cross_cluster type for the API key based remote cluster access. A cross_cluster API key cannot be used to authenticate through the REST interface.

IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.

Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.

NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the access property.

A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

By default, API keys never expire. You can specify expiration information when you create the API keys.

Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. :param name: Specifies the name for this API key. :param expiration: Expiration time for the API key. By default, API keys never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. """ if access is None and body is None: raise ValueError("Empty value passed for parameter 'access'") if name is None and body is None: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {} __path = "/_security/cross_cluster/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if access is not None: __body["access"] = access if name is not None: __body["name"] = name if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.create_cross_cluster_api_key", path_parts=__path_parts, ) @_rewrite_parameters() async def create_service_token( self, *, namespace: str, service: str, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a service account token.

Create a service accounts token for access without requiring basic authentication.

NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed.

``_ :param namespace: The name of the namespace, which is a top-level grouping of service accounts. :param service: The name of the service. :param name: The name for the service account token. If omitted, a random name will be generated. Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") __path_parts: t.Dict[str, str] if ( namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH and name not in SKIP_IN_PATH ): __path_parts = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}' __method = "PUT" elif namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace), "service": _quote(service)} __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token' __method = "POST" else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] __method, __path, params=__query, headers=__headers, endpoint_id="security.create_service_token", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("x509_certificate_chain",), ) async def delegate_pki( self, *, x509_certificate_chain: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delegate PKI authentication.

This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has delegation.enabled set to true. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw username_pattern of the respective realm.

This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.

IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. """ if x509_certificate_chain is None and body is None: raise ValueError( "Empty value passed for parameter 'x509_certificate_chain'" ) __path_parts: t.Dict[str, str] = {} __path = "/_security/delegate_pki" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if x509_certificate_chain is not None: __body["x509_certificate_chain"] = x509_certificate_chain __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.delegate_pki", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_privileges( self, *, application: str, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete application privileges.

To use this API, you must have one of the following privileges:

  • The manage_security cluster privilege (or a greater privilege such as all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
``_ :param application: The name of the application. Application privileges are always associated with exactly one application. :param name: The name of the privilege. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "application": _quote(application), "name": _quote(name), } __path = ( f'/_security/privilege/{__path_parts["application"]}/{__path_parts["name"]}' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_privileges", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_role( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete roles.

Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files.

``_ :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_role", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_role_mapping( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete role mappings.

Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_service_token( self, *, namespace: str, service: str, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete service account tokens.

Delete service account tokens for a service in a specified namespace.

``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The service name. :param name: The name of the service account token. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_service_token", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete users.

Delete users from the native realm.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_user", path_parts=__path_parts, ) @_rewrite_parameters() async def disable_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Disable users.

Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_disable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.disable_user", path_parts=__path_parts, ) @_rewrite_parameters() async def disable_user_profile( self, *, uid: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Disable a user profile.

Disable user profiles so that they are not visible in user profile searches.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API .

``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_disable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.disable_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() async def enable_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enable users.

Enable users in the native realm. By default, when you create users, they are enabled.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_enable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.enable_user", path_parts=__path_parts, ) @_rewrite_parameters() async def enable_user_profile( self, *, uid: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enable a user profile.

Enable user profiles to make them visible in user profile searches.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_enable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.enable_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() async def enroll_kibana( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enroll Kibana.

Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster.

NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.enroll_kibana", path_parts=__path_parts, ) @_rewrite_parameters() async def enroll_node( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enroll a node.

Enroll a new node to allow it to join an existing cluster with security features enabled.

The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.enroll_node", path_parts=__path_parts, ) @_rewrite_parameters() async def get_api_key( self, *, active_only: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, name: t.Optional[str] = None, owner: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, username: t.Optional[str] = None, with_limited_by: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get API key information.

Retrieves information for one or more API keys. NOTE: If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. :param id: An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. :param name: An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. :param owner: A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. :param with_profile_uid: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if id is not None: __query["id"] = id if name is not None: __query["name"] = name if owner is not None: __query["owner"] = owner if pretty is not None: __query["pretty"] = pretty if realm_name is not None: __query["realm_name"] = realm_name if username is not None: __query["username"] = username if with_limited_by is not None: __query["with_limited_by"] = with_limited_by if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_api_key", path_parts=__path_parts, ) @_rewrite_parameters() async def get_builtin_privileges( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get builtin privileges.

Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_builtin_privileges", path_parts=__path_parts, ) @_rewrite_parameters() async def get_privileges( self, *, application: t.Optional[str] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get application privileges.

To use this API, you must have one of the following privileges:

  • The read_security cluster privilege (or a greater privilege such as manage_security or all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. :param name: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. """ __path_parts: t.Dict[str, str] if application not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"application": _quote(application), "name": _quote(name)} __path = f'/_security/privilege/{__path_parts["application"]}/{__path_parts["name"]}' elif application not in SKIP_IN_PATH: __path_parts = {"application": _quote(application)} __path = f'/_security/privilege/{__path_parts["application"]}' else: __path_parts = {} __path = "/_security/privilege" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_privileges", path_parts=__path_parts, ) @_rewrite_parameters() async def get_role( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get roles.

Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files.

``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' else: __path_parts = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_role", path_parts=__path_parts, ) @_rewrite_parameters() async def get_role_mapping( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get role mappings.

Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' else: __path_parts = {} __path = "/_security/role_mapping" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters() async def get_service_accounts( self, *, namespace: t.Optional[str] = None, service: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get service accounts.

Get a list of service accounts that match the provided path parameters.

NOTE: Currently, only the elastic/fleet-server service account is available.

``_ :param namespace: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. :param service: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. """ __path_parts: t.Dict[str, str] if namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace), "service": _quote(service)} __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}' elif namespace not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace)} __path = f'/_security/service/{__path_parts["namespace"]}' else: __path_parts = {} __path = "/_security/service" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_service_accounts", path_parts=__path_parts, ) @_rewrite_parameters() async def get_service_credentials( self, *, namespace: str, service: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get service account credentials.

To use this API, you must have at least the read_security cluster privilege (or a greater privilege such as manage_service_account or manage_security).

The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.

NOTE: For tokens backed by the service_tokens file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

``_ :param namespace: The name of the namespace. :param service: The service name. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_service_credentials", path_parts=__path_parts, ) @_rewrite_parameters() async def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get security index settings.

Get the user-configurable settings for the security internal index (.security and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes:

  • index.auto_expand_replicas
  • index.number_of_replicas
``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "grant_type", "kerberos_ticket", "password", "refresh_token", "scope", "username", ), ) async def get_token( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, grant_type: t.Optional[ t.Union[ str, t.Literal[ "_kerberos", "client_credentials", "password", "refresh_token" ], ] ] = None, human: t.Optional[bool] = None, kerberos_ticket: t.Optional[str] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, scope: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a token.

Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the xpack.security.authc.token.enabled setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.

The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.

A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.

The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the xpack.security.authc.token.timeout setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API.

``_ :param grant_type: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. :param kerberos_ticket: The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param password: The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param refresh_token: The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param scope: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. :param username: The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grant_type is not None: __body["grant_type"] = grant_type if kerberos_ticket is not None: __body["kerberos_ticket"] = kerberos_ticket if password is not None: __body["password"] = password if refresh_token is not None: __body["refresh_token"] = refresh_token if scope is not None: __body["scope"] = scope if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.get_token", path_parts=__path_parts, ) @_rewrite_parameters() async def get_user( self, *, username: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get users.

Get information about users in the native realm and built-in users.

``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. :param with_profile_uid: Determines whether to retrieve the user profile UID, if it exists, for the users. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: __path_parts = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' else: __path_parts = {} __path = "/_security/user" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user", path_parts=__path_parts, ) @_rewrite_parameters() async def get_user_privileges( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get user privileges.

Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/user/_privileges" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user_privileges", path_parts=__path_parts, ) @_rewrite_parameters() async def get_user_profile( self, *, uid: t.Union[str, t.Sequence[str]], data: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a user profile.

Get a user's profile using the unique profile ID.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}' __query: t.Dict[str, t.Any] = {} if data is not None: __query["data"] = data if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user_profile", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "api_key", "grant_type", "access_token", "password", "run_as", "username", ), ignore_deprecated_options={"api_key"}, ) async def grant_api_key( self, *, api_key: t.Optional[t.Mapping[str, t.Any]] = None, grant_type: t.Optional[ t.Union[str, t.Literal["access_token", "password"]] ] = None, access_token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, run_as: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Grant an API key.

Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are:

  • username and password
  • Elasticsearch access tokens
  • JWTs

The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user.

This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled.

A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

By default, API keys never expire. You can specify expiration information when you create the API keys.

``_ :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, `password`. :param access_token: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. """ if api_key is None and body is None: raise ValueError("Empty value passed for parameter 'api_key'") if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key/grant" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if api_key is not None: __body["api_key"] = api_key if grant_type is not None: __body["grant_type"] = grant_type if access_token is not None: __body["access_token"] = access_token if password is not None: __body["password"] = password if run_as is not None: __body["run_as"] = run_as if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.grant_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("application", "cluster", "index"), ) async def has_privileges( self, *, user: t.Optional[str] = None, application: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, cluster: t.Optional[ t.Sequence[ t.Union[ str, t.Literal[ "all", "cancel_task", "create_snapshot", "cross_cluster_replication", "cross_cluster_search", "delegate_pki", "grant_api_key", "manage", "manage_api_key", "manage_autoscaling", "manage_behavioral_analytics", "manage_ccr", "manage_data_frame_transforms", "manage_data_stream_global_retention", "manage_enrich", "manage_esql", "manage_ilm", "manage_index_templates", "manage_inference", "manage_ingest_pipelines", "manage_logstash_pipelines", "manage_ml", "manage_oidc", "manage_own_api_key", "manage_pipeline", "manage_rollup", "manage_saml", "manage_search_application", "manage_search_query_rules", "manage_search_synonyms", "manage_security", "manage_service_account", "manage_slm", "manage_token", "manage_transform", "manage_user_profile", "manage_watcher", "monitor", "monitor_data_frame_transforms", "monitor_data_stream_global_retention", "monitor_enrich", "monitor_esql", "monitor_inference", "monitor_ml", "monitor_rollup", "monitor_snapshot", "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", "none", "post_behavioral_analytics_event", "read_ccr", "read_fleet_secrets", "read_ilm", "read_pipeline", "read_security", "read_slm", "transport_client", "write_connector_secrets", "write_fleet_secrets", ], ] ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check user privileges.

Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature.

``_ :param user: Username :param application: :param cluster: A list of the cluster privileges that you want to check. :param index: """ __path_parts: t.Dict[str, str] if user not in SKIP_IN_PATH: __path_parts = {"user": _quote(user)} __path = f'/_security/user/{__path_parts["user"]}/_has_privileges' else: __path_parts = {} __path = "/_security/user/_has_privileges" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if application is not None: __body["application"] = application if cluster is not None: __body["cluster"] = cluster if index is not None: __body["index"] = index __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.has_privileges", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("privileges", "uids"), ) async def has_privileges_user_profile( self, *, privileges: t.Optional[t.Mapping[str, t.Any]] = None, uids: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check user profile privileges.

Determine whether the users associated with the specified user profile IDs have all the requested privileges.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated users of the profiles. """ if privileges is None and body is None: raise ValueError("Empty value passed for parameter 'privileges'") if uids is None and body is None: raise ValueError("Empty value passed for parameter 'uids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_has_privileges" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if privileges is not None: __body["privileges"] = privileges if uids is not None: __body["uids"] = uids __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.has_privileges_user_profile", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id", "ids", "name", "owner", "realm_name", "username"), ) async def invalidate_api_key( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, ids: t.Optional[t.Sequence[str]] = None, name: t.Optional[str] = None, owner: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate API keys.

This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.

To use this API, you must have at least the manage_security, manage_api_key, or manage_own_api_key cluster privileges. The manage_security privilege allows deleting any API key, including both REST and cross cluster API keys. The manage_api_key privilege allows deleting any REST API key, but not cross cluster API keys. The manage_own_api_key only allows deleting REST API keys that are owned by the user. In addition, with the manage_own_api_key privilege, an invalidation request must be issued in one of the three formats:

  • Set the parameter owner=true.
  • Or, set both username and realm_name to match the user's identity.
  • Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the ids field.
``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. :param name: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. :param owner: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if ids is not None: __body["ids"] = ids if name is not None: __body["name"] = name if owner is not None: __body["owner"] = owner if realm_name is not None: __body["realm_name"] = realm_name if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.invalidate_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("realm_name", "refresh_token", "token", "username"), ) async def invalidate_token( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, refresh_token: t.Optional[str] = None, token: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate a token.

The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the xpack.security.authc.token.timeout setting.

The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API.

NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of token or refresh_token parameters is required. If none of these two are specified, then realm_name and/or username need to be specified.

``_ :param realm_name: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. :param refresh_token: A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. :param token: An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. :param username: The username of a user. This parameter cannot be used with either `refresh_token` or `token`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if realm_name is not None: __body["realm_name"] = realm_name if refresh_token is not None: __body["refresh_token"] = refresh_token if token is not None: __body["token"] = token if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.invalidate_token", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("nonce", "redirect_uri", "state", "realm"), ) async def oidc_authenticate( self, *, nonce: t.Optional[str] = None, redirect_uri: t.Optional[str] = None, state: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate OpenID Connect.

Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. :param redirect_uri: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. :param state: Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. :param realm: The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. """ if nonce is None and body is None: raise ValueError("Empty value passed for parameter 'nonce'") if redirect_uri is None and body is None: raise ValueError("Empty value passed for parameter 'redirect_uri'") if state is None and body is None: raise ValueError("Empty value passed for parameter 'state'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/authenticate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if nonce is not None: __body["nonce"] = nonce if redirect_uri is not None: __body["redirect_uri"] = redirect_uri if state is not None: __body["state"] = state if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("token", "refresh_token"), ) async def oidc_logout( self, *, token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of OpenID Connect.

Invalidate an access token and a refresh token that were generated as a response to the /_security/oidc/authenticate API.

If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. """ if token is None and body is None: raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if token is not None: __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("iss", "login_hint", "nonce", "realm", "state"), ) async def oidc_prepare_authentication( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, iss: t.Optional[str] = None, login_hint: t.Optional[str] = None, nonce: t.Optional[str] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, state: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare OpenID connect authentication.

Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch.

The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required. :param login_hint: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified. :param nonce: The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required. :param state: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/prepare" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if iss is not None: __body["iss"] = iss if login_hint is not None: __body["login_hint"] = login_hint if nonce is not None: __body["nonce"] = nonce if realm is not None: __body["realm"] = realm if state is not None: __body["state"] = state __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_prepare_authentication", path_parts=__path_parts, ) @_rewrite_parameters( body_name="privileges", ) async def put_privileges( self, *, privileges: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, body: t.Optional[t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update application privileges.

To use this API, you must have one of the following privileges:

  • The manage_security cluster privilege (or a greater privilege such as all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.

Application names are formed from a prefix, with an optional suffix that conform to the following rules:

  • The prefix must begin with a lowercase ASCII letter.
  • The prefix must contain only ASCII letters or digits.
  • The prefix must be at least 3 characters long.
  • If the suffix exists, it must begin with either a dash - or _.
  • The suffix cannot contain any of the following characters: \\, /, *, ?, ", <, >, |, ,, *.
  • No part of the name can contain whitespace.

Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters _, -, and ..

Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if privileges is None and body is None: raise ValueError( "Empty value passed for parameters 'privileges' and 'body', one of them should be set." ) elif privileges is not None and body is not None: raise ValueError("Cannot set both 'privileges' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __body = privileges if privileges is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_privileges", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "applications", "cluster", "description", "global_", "indices", "metadata", "remote_cluster", "remote_indices", "run_as", "transient_metadata", ), parameter_aliases={"global": "global_"}, ) async def put_role( self, *, name: str, applications: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, cluster: t.Optional[ t.Sequence[ t.Union[ str, t.Literal[ "all", "cancel_task", "create_snapshot", "cross_cluster_replication", "cross_cluster_search", "delegate_pki", "grant_api_key", "manage", "manage_api_key", "manage_autoscaling", "manage_behavioral_analytics", "manage_ccr", "manage_data_frame_transforms", "manage_data_stream_global_retention", "manage_enrich", "manage_esql", "manage_ilm", "manage_index_templates", "manage_inference", "manage_ingest_pipelines", "manage_logstash_pipelines", "manage_ml", "manage_oidc", "manage_own_api_key", "manage_pipeline", "manage_rollup", "manage_saml", "manage_search_application", "manage_search_query_rules", "manage_search_synonyms", "manage_security", "manage_service_account", "manage_slm", "manage_token", "manage_transform", "manage_user_profile", "manage_watcher", "monitor", "monitor_data_frame_transforms", "monitor_data_stream_global_retention", "monitor_enrich", "monitor_esql", "monitor_inference", "monitor_ml", "monitor_rollup", "monitor_snapshot", "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", "none", "post_behavioral_analytics_event", "read_ccr", "read_fleet_secrets", "read_ilm", "read_pipeline", "read_security", "read_slm", "transport_client", "write_connector_secrets", "write_fleet_secrets", ], ] ] ] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, global_: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update roles.

The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless.

``_ :param name: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. :param applications: A list of application privilege entries. :param cluster: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. :param description: Optional description of the role descriptor :param global_: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. :param indices: A list of indices permissions entries. :param metadata: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. :param transient_metadata: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if applications is not None: __body["applications"] = applications if cluster is not None: __body["cluster"] = cluster if description is not None: __body["description"] = description if global_ is not None: __body["global"] = global_ if indices is not None: __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: __body["run_as"] = run_as if transient_metadata is not None: __body["transient_metadata"] = transient_metadata __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "enabled", "metadata", "role_templates", "roles", "rules", "run_as", ), ) async def put_role_mapping( self, *, name: str, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, role_templates: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, roles: t.Optional[t.Sequence[str]] = None, rules: t.Optional[t.Mapping[str, t.Any]] = None, run_as: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update role mappings.

Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.

NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files.

Role templates

The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the cn=admin,dc=example,dc=com LDAP group should be given the superuser role in Elasticsearch. The roles field is used for this purpose.

For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The role_templates field is used for this purpose.

NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail.

All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.

By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. :param enabled: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. :param metadata: Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param role_templates: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. :param roles: A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. :param rules: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. :param run_as: """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if enabled is not None: __body["enabled"] = enabled if metadata is not None: __body["metadata"] = metadata if role_templates is not None: __body["role_templates"] = role_templates if roles is not None: __body["roles"] = roles if rules is not None: __body["rules"] = rules if run_as is not None: __body["run_as"] = run_as __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "email", "enabled", "full_name", "metadata", "password", "password_hash", "roles", ), ) async def put_user( self, *, username: str, email: t.Optional[t.Union[None, str]] = None, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, full_name: t.Optional[t.Union[None, str]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, password: t.Optional[str] = None, password_hash: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, roles: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update users.

Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API.

``_ :param username: An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. :param email: The email of the user. :param enabled: Specifies whether the user is enabled. :param full_name: The full name of the user. :param metadata: Arbitrary metadata that you want to associate with the user. :param password: The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password :param password_hash: A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. :param refresh: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. :param roles: A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if email is not None: __body["email"] = email if enabled is not None: __body["enabled"] = enabled if full_name is not None: __body["full_name"] = full_name if metadata is not None: __body["metadata"] = metadata if password is not None: __body["password"] = password if password_hash is not None: __body["password_hash"] = password_hash if roles is not None: __body["roles"] = roles __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_user", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "from_", "query", "search_after", "size", "sort", ), parameter_aliases={"from": "from_"}, ) async def query_api_keys( self, *, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, typed_keys: t.Optional[bool] = None, with_limited_by: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find API keys with a query.

Get a paginated list of API keys and their information. You can optionally filter the results with a query.

To use this API, you must have at least the manage_own_api_key or the read_security cluster privileges. If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership. Refer to the linked documentation for examples of how to find API keys:

``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. :param aggs: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. :param search_after: The search after definition. :param size: The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. :param with_profile_uid: Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if with_limited_by is not None: __query["with_limited_by"] = with_limited_by if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_api_keys", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) async def query_role( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find roles with a query.

Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted.

``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. :param search_after: The search after definition. :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) async def query_user( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, with_profile_uid: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find users with a query.

Get information for users in a paginated manner. You can optionally filter the results with a query.

NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users.

``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. :param search_after: The search after definition :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. :param with_profile_uid: Determines whether to retrieve the user profile UID, if it exists, for the users. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/user" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_user", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("content", "ids", "realm"), ) async def saml_authenticate( self, *, content: t.Optional[str] = None, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate SAML.

Submit a SAML response message to Elasticsearch for consumption.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The SAML message that is submitted can be:

  • A response to a SAML authentication request that was previously created using the SAML prepare authentication API.
  • An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow.

In either case, the SAML message needs to be a base64 encoded XML document with a root element of <Response>.

After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. """ if content is None and body is None: raise ValueError("Empty value passed for parameter 'content'") if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/authenticate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if content is not None: __body["content"] = content if ids is not None: __body["ids"] = ids if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("ids", "realm", "content", "query_string"), ) async def saml_complete_logout( self, *, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, realm: t.Optional[str] = None, content: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query_string: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of SAML completely.

Verifies the logout response sent from the SAML IdP.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them.

``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. :param content: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. :param query_string: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. """ if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") if realm is None and body is None: raise ValueError("Empty value passed for parameter 'realm'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/complete_logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if ids is not None: __body["ids"] = ids if realm is not None: __body["realm"] = realm if content is not None: __body["content"] = content if query_string is not None: __body["query_string"] = query_string __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_complete_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query_string", "acs", "realm"), ) async def saml_invalidate( self, *, query_string: t.Optional[str] = None, acs: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate SAML.

Submit a SAML LogoutRequest message to Elasticsearch for consumption.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the LogoutRequest. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP.

``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. :param acs: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. """ if query_string is None and body is None: raise ValueError("Empty value passed for parameter 'query_string'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/invalidate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query_string is not None: __body["query_string"] = query_string if acs is not None: __body["acs"] = acs if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_invalidate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("token", "refresh_token"), ) async def saml_logout( self, *, token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of SAML.

Submits a request to invalidate an access token and refresh token.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. :param refresh_token: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. """ if token is None and body is None: raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if token is not None: __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("acs", "realm", "relay_state"), ) async def saml_prepare_authentication( self, *, acs: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, relay_state: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare SAML authentication.

Create a SAML authentication request (<AuthnRequest>) as a URL string based on the configuration of the respective SAML realm in Elasticsearch.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named SAMLRequest, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named SigAlg and Signature. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. :param relay_state: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/prepare" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if acs is not None: __body["acs"] = acs if realm is not None: __body["realm"] = realm if relay_state is not None: __body["relay_state"] = relay_state __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_prepare_authentication", path_parts=__path_parts, ) @_rewrite_parameters() async def saml_service_provider_metadata( self, *, realm_name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create SAML service provider metadata.

Generate SAML metadata for a SAML 2.0 Service Provider.

The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ if realm_name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realm_name'") __path_parts: t.Dict[str, str] = {"realm_name": _quote(realm_name)} __path = f'/_security/saml/metadata/{__path_parts["realm_name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.saml_service_provider_metadata", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data", "hint", "name", "size"), ) async def suggest_user_profiles( self, *, data: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, hint: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Suggest a user profile.

Get suggestions for user profiles that match specified search criteria.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. :param hint: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. :param name: A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. :param size: The number of profiles to return. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_suggest" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if data is not None: __body["data"] = data if hint is not None: __body["hint"] = hint if name is not None: __body["name"] = name if size is not None: __body["size"] = size if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.suggest_user_profiles", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("expiration", "metadata", "role_descriptors"), ) async def update_api_key( self, *, id: str, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an API key.

Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata.

To use this API, you must have at least the manage_own_api_key cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the run_as feature to submit a request on behalf of another user.

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.

Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.

The access scope of an API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call.

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

``_ :param id: The ID of the API key to update. :param expiration: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. :param role_descriptors: The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_security/api_key/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if role_descriptors is not None: __body["role_descriptors"] = role_descriptors if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("access", "expiration", "metadata"), ) async def update_cross_cluster_api_key( self, *, id: str, access: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a cross-cluster API key.

Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.

To use this API, you must have at least the manage_security cluster privilege. Users can only update API keys that they created. To update another user's API key, use the run_as feature to submit a request on behalf of another user.

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required.

It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.

This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the username and realm, is also updated automatically on every call.

NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

To learn more about how to use this API, refer to the Update cross cluter API key API examples page.

``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. :param expiration: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if access is None and body is None: raise ValueError("Empty value passed for parameter 'access'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if access is not None: __body["access"] = access if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_cross_cluster_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("security", "security_profile", "security_tokens"), parameter_aliases={ "security-profile": "security_profile", "security-tokens": "security_tokens", }, ) async def update_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, security: t.Optional[t.Mapping[str, t.Any]] = None, security_profile: t.Optional[t.Mapping[str, t.Any]] = None, security_tokens: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update security index settings.

Update the user-configurable settings for the security internal index (.security and associated indices). Only a subset of settings are allowed to be modified. This includes index.auto_expand_replicas and index.number_of_replicas.

NOTE: If index.auto_expand_replicas is set, index.number_of_replicas will be ignored during updates.

If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param security: Settings for the index used for most security configuration, including native realm users and roles configured with the API. :param security_profile: Settings for the index used to store profile information. :param security_tokens: Settings for the index used to store tokens. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if security is not None: __body["security"] = security if security_profile is not None: __body["security-profile"] = security_profile if security_tokens is not None: __body["security-tokens"] = security_tokens __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data", "labels"), ) async def update_user_profile_data( self, *, uid: str, data: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, labels: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update user profile data.

Update specific data for the user profile that is associated with a unique ID.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

To use this API, you must have one of the following privileges:

  • The manage_user_profile cluster privilege.
  • The update_profile_data global privilege for the namespaces that are referenced in the request.

This API updates the labels and data fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.

For both labels and data, content is namespaced by the top-level fields. The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param labels: Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_data' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if data is not None: __body["data"] = data if labels is not None: __body["labels"] = labels __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_user_profile_data", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/shutdown.py000066400000000000000000000317041506101734100254470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class ShutdownClient(NamespacedClient): @_rewrite_parameters() async def delete_node( self, *, node_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch.

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If the operator privileges feature is enabled, you must be an operator to use this API.

``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="shutdown.delete_node", path_parts=__path_parts, ) @_rewrite_parameters() async def get_node( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the shutdown status.

Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If the operator privileges feature is enabled, you must be an operator to use this API.

``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' else: __path_parts = {} __path = "/_nodes/shutdown" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="shutdown.get_node", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("reason", "type", "allocation_delay", "target_node_name"), ) async def put_node( self, *, node_id: str, reason: t.Optional[str] = None, type: t.Optional[ t.Union[str, t.Literal["remove", "replace", "restart"]] ] = None, allocation_delay: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, target_node_name: t.Optional[str] = None, timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare a node to be shut down.

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.

If the operator privileges feature is enabled, you must be an operator to use this API.

The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.

You must specify the type of shutdown: restart, remove, or replace. If a node is already being prepared for shutdown, you can use this API to change the shutdown type.

IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. :param reason: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. :param type: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. :param allocation_delay: Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param target_node_name: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") if reason is None and body is None: raise ValueError("Empty value passed for parameter 'reason'") if type is None and body is None: raise ValueError("Empty value passed for parameter 'type'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if reason is not None: __body["reason"] = reason if type is not None: __body["type"] = type if allocation_delay is not None: __body["allocation_delay"] = allocation_delay if target_node_name is not None: __body["target_node_name"] = target_node_name __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="shutdown.put_node", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/simulate.py000066400000000000000000000162641506101734100254230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SimulateClient(NamespacedClient): @_rewrite_parameters( body_fields=( "docs", "component_template_substitutions", "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), ) @_stability_warning(Stability.EXPERIMENTAL) async def ingest( self, *, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, component_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, pipeline: t.Optional[str] = None, pipeline_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.

This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.

The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.

This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.

By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. """ if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_ingest/{__path_parts["index"]}/_simulate' else: __path_parts = {} __path = "/_ingest/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if not __body: if docs is not None: __body["docs"] = docs if component_template_substitutions is not None: __body["component_template_substitutions"] = ( component_template_substitutions ) if index_template_substitutions is not None: __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: __body["pipeline_substitutions"] = pipeline_substitutions __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="simulate.ingest", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/slm.py000066400000000000000000000574471506101734100244030ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SlmClient(NamespacedClient): @_rewrite_parameters() async def delete_lifecycle( self, *, policy_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="slm.delete_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def execute_lifecycle( self, *, policy_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}/_execute' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="slm.execute_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def execute_retention( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.execute_retention", path_parts=__path_parts, ) @_rewrite_parameters() async def get_lifecycle( self, *, policy_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if policy_id not in SKIP_IN_PATH: __path_parts = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' else: __path_parts = {} __path = "/_slm/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def get_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_stats", path_parts=__path_parts, ) @_rewrite_parameters() async def get_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the snapshot lifecycle management status.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("config", "name", "repository", "retention", "schedule"), ) async def put_lifecycle( self, *, policy_id: str, config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, repository: t.Optional[str] = None, retention: t.Optional[t.Mapping[str, t.Any]] = None, schedule: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored.

``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param name: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. :param repository: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. :param retention: Retention rules used to retain and delete snapshots created by the policy. :param schedule: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if config is not None: __body["config"] = config if name is not None: __body["name"] = name if repository is not None: __body["repository"] = repository if retention is not None: __body["retention"] = retention if schedule is not None: __body["schedule"] = schedule if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="slm.put_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() async def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.start", path_parts=__path_parts, ) @_rewrite_parameters() async def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.

The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.stop", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/snapshot.py000066400000000000000000002330031506101734100254270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SnapshotClient(NamespacedClient): @_rewrite_parameters() async def cleanup_repository( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

``_ :param name: The name of the snapshot repository to clean up. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_cleanup' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.cleanup_repository", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("indices",), ) async def clone( self, *, repository: str, snapshot: str, target_snapshot: str, indices: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository.

``_ :param repository: The name of the snapshot repository that both source and target snapshot belong to. :param snapshot: The source snapshot name. :param target_snapshot: The target snapshot name. :param indices: A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") if target_snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target_snapshot'") if indices is None and body is None: raise ValueError("Empty value passed for parameter 'indices'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), "target_snapshot": _quote(target_snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_clone/{__path_parts["target_snapshot"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if indices is not None: __body["indices"] = indices __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.clone", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "expand_wildcards", "feature_states", "ignore_unavailable", "include_global_state", "indices", "metadata", "partial", ), ) async def create( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, feature_states: t.Optional[t.Sequence[str]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_global_state: t.Optional[bool] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, partial: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a snapshot. Take a snapshot of a cluster or of data streams and indices.

``_ :param repository: The name of the repository for the snapshot. :param snapshot: The name of the snapshot. It supportes date math. It must be unique in the repository. :param expand_wildcards: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`. :param feature_states: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). :param ignore_unavailable: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. :param include_global_state: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). :param indices: A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param metadata: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. :param partial: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. :param wait_for_completion: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if expand_wildcards is not None: __body["expand_wildcards"] = expand_wildcards if feature_states is not None: __body["feature_states"] = feature_states if ignore_unavailable is not None: __body["ignore_unavailable"] = ignore_unavailable if include_global_state is not None: __body["include_global_state"] = include_global_state if indices is not None: __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata if partial is not None: __body["partial"] = partial if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.create", path_parts=__path_parts, ) @_rewrite_parameters( body_name="repository", ) async def create_repository( self, *, name: str, repository: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used.

``_ :param name: The name of the snapshot repository to register or update. :param repository: :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. :param verify: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if repository is None and body is None: raise ValueError( "Empty value passed for parameters 'repository' and 'body', one of them should be set." ) elif repository is not None and body is not None: raise ValueError("Cannot set both 'repository' and 'body'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if verify is not None: __query["verify"] = verify __body = repository if repository is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.create_repository", path_parts=__path_parts, ) @_rewrite_parameters() async def delete( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete snapshots.

``_ :param repository: The name of the repository to delete a snapshot from. :param snapshot: A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`). :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param wait_for_completion: If `true`, the request returns a response when the matching snapshots are all deleted. If `false`, the request returns a response as soon as the deletes are scheduled. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="snapshot.delete", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_repository( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place.

``_ :param name: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="snapshot.delete_repository", path_parts=__path_parts, ) @_rewrite_parameters() async def get( self, *, repository: str, snapshot: t.Union[str, t.Sequence[str]], after: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_sort_value: t.Optional[str] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_repository: t.Optional[bool] = None, index_details: t.Optional[bool] = None, index_names: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, offset: t.Optional[int] = None, order: t.Optional[t.Union[str, t.Literal["asc", "desc"]]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, slm_policy_filter: t.Optional[str] = None, sort: t.Optional[ t.Union[ str, t.Literal[ "duration", "failed_shard_count", "index_count", "name", "repository", "shard_count", "start_time", ], ] ] = None, state: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS", ], ] ], t.Union[ str, t.Literal[ "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS" ], ], ] ] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot information.

NOTE: The after parameter and next field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration.

``_ :param repository: A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. :param snapshot: A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. :param after: An offset identifier to start pagination from as returned by the next field in the response body. :param from_sort_value: The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. :param ignore_unavailable: If `false`, the request returns an error for any snapshots that are unavailable. :param include_repository: If `true`, the response includes the repository name in each snapshot. :param index_details: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. :param index_names: If `true`, the response includes the name of each index in each snapshot. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param offset: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. :param order: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. :param size: The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. :param slm_policy_filter: Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. :param sort: The sort order for the result. The default behavior is sorting by snapshot start time stamp. :param state: Only return snapshots with a state found in the given comma-separated list of snapshot states. The default is all snapshot states. :param verbose: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} if after is not None: __query["after"] = after if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_sort_value is not None: __query["from_sort_value"] = from_sort_value if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_repository is not None: __query["include_repository"] = include_repository if index_details is not None: __query["index_details"] = index_details if index_names is not None: __query["index_names"] = index_names if master_timeout is not None: __query["master_timeout"] = master_timeout if offset is not None: __query["offset"] = offset if order is not None: __query["order"] = order if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if slm_policy_filter is not None: __query["slm_policy_filter"] = slm_policy_filter if sort is not None: __query["sort"] = sort if state is not None: __query["state"] = state if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.get", path_parts=__path_parts, ) @_rewrite_parameters() async def get_repository( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot repository information.

``_ :param name: A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. :param local: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' else: __path_parts = {} __path = "/_snapshot" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.get_repository", path_parts=__path_parts, ) @_rewrite_parameters() async def repository_analyze( self, *, name: str, blob_count: t.Optional[int] = None, concurrency: t.Optional[int] = None, detailed: t.Optional[bool] = None, early_read_node_count: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_blob_size: t.Optional[t.Union[int, str]] = None, max_total_data_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, rare_action_probability: t.Optional[float] = None, rarely_abort_writes: t.Optional[bool] = None, read_node_count: t.Optional[int] = None, register_operation_count: t.Optional[int] = None, seed: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Analyze a snapshot repository.

Performs operations on a snapshot repository in order to check for incorrect behaviour.

There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.

The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a blob_count of at least 2000, a max_blob_size of at least 2gb, a max_total_data_size of at least 1tb, and a register_operation_count of at least 100. Always specify a generous timeout, possibly 1h or longer, to allow time for each analysis to run to completion. Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.

If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.

If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it.

If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it.

If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following:

  • Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.
  • Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.
  • Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.

IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings max_snapshot_bytes_per_sec and max_restore_bytes_per_sec if available and the cluster setting indices.recovery.max_bytes_per_sec which you can use to limit the bandwidth they consume.

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. The response exposes immplementation details of the analysis which may change from version to version.

NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.

NOTE: This API may not work correctly in a mixed-version cluster.

Implementation details

NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.

The analysis comprises a number of blob-level tasks, as set by the blob_count parameter and a number of compare-and-exchange operations on linearizable registers, as set by the register_operation_count parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution.

For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the max_blob_size and max_total_data_size parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.

For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.

For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.

The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data.

For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.

Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. :param concurrency: The number of operations to run concurrently during the test. :param detailed: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. :param early_read_node_count: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. :param max_blob_size: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. :param max_total_data_size: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. :param rare_action_probability: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. :param rarely_abort_writes: Indicates whether to rarely cancel writes before they complete. :param read_node_count: The number of nodes on which to read a blob after writing. :param register_operation_count: The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. :param seed: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. :param timeout: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_analyze' __query: t.Dict[str, t.Any] = {} if blob_count is not None: __query["blob_count"] = blob_count if concurrency is not None: __query["concurrency"] = concurrency if detailed is not None: __query["detailed"] = detailed if early_read_node_count is not None: __query["early_read_node_count"] = early_read_node_count if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if max_blob_size is not None: __query["max_blob_size"] = max_blob_size if max_total_data_size is not None: __query["max_total_data_size"] = max_total_data_size if pretty is not None: __query["pretty"] = pretty if rare_action_probability is not None: __query["rare_action_probability"] = rare_action_probability if rarely_abort_writes is not None: __query["rarely_abort_writes"] = rarely_abort_writes if read_node_count is not None: __query["read_node_count"] = read_node_count if register_operation_count is not None: __query["register_operation_count"] = register_operation_count if seed is not None: __query["seed"] = seed if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.repository_analyze", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def repository_verify_integrity( self, *, name: t.Union[str, t.Sequence[str]], blob_thread_pool_concurrency: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_snapshot_verification_concurrency: t.Optional[int] = None, index_verification_concurrency: t.Optional[int] = None, max_bytes_per_sec: t.Optional[str] = None, max_failed_shard_snapshots: t.Optional[int] = None, meta_thread_pool_concurrency: t.Optional[int] = None, pretty: t.Optional[bool] = None, snapshot_verification_concurrency: t.Optional[int] = None, verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Verify the repository integrity. Verify the integrity of the contents of a snapshot repository.

This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.

If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity. Until you do so:

  • It may not be possible to restore some snapshots from this repository.
  • Searchable snapshots may report errors when searched or may have unassigned shards.
  • Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.
  • Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.
  • Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.

If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again.

If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.

Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

NOTE: This API may not work correctly in a mixed-version cluster.

The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the snapshot_meta threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.

The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions.

``_ :param name: The name of the snapshot repository. :param blob_thread_pool_concurrency: If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. :param index_snapshot_verification_concurrency: The maximum number of index snapshots to verify concurrently within each index verification. :param index_verification_concurrency: The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool. :param max_bytes_per_sec: If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. :param max_failed_shard_snapshots: The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail. :param meta_thread_pool_concurrency: The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. :param snapshot_verification_concurrency: The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. :param verify_blob_contents: Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_verify_integrity' __query: t.Dict[str, t.Any] = {} if blob_thread_pool_concurrency is not None: __query["blob_thread_pool_concurrency"] = blob_thread_pool_concurrency if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if index_snapshot_verification_concurrency is not None: __query["index_snapshot_verification_concurrency"] = ( index_snapshot_verification_concurrency ) if index_verification_concurrency is not None: __query["index_verification_concurrency"] = index_verification_concurrency if max_bytes_per_sec is not None: __query["max_bytes_per_sec"] = max_bytes_per_sec if max_failed_shard_snapshots is not None: __query["max_failed_shard_snapshots"] = max_failed_shard_snapshots if meta_thread_pool_concurrency is not None: __query["meta_thread_pool_concurrency"] = meta_thread_pool_concurrency if pretty is not None: __query["pretty"] = pretty if snapshot_verification_concurrency is not None: __query["snapshot_verification_concurrency"] = ( snapshot_verification_concurrency ) if verify_blob_contents is not None: __query["verify_blob_contents"] = verify_blob_contents __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.repository_verify_integrity", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "feature_states", "ignore_index_settings", "ignore_unavailable", "include_aliases", "include_global_state", "index_settings", "indices", "partial", "rename_pattern", "rename_replacement", ), ) async def restore( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, feature_states: t.Optional[t.Sequence[str]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_index_settings: t.Optional[t.Sequence[str]] = None, ignore_unavailable: t.Optional[bool] = None, include_aliases: t.Optional[bool] = None, include_global_state: t.Optional[bool] = None, index_settings: t.Optional[t.Mapping[str, t.Any]] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, partial: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rename_pattern: t.Optional[str] = None, rename_replacement: t.Optional[str] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Restore a snapshot. Restore a snapshot of a cluster or data streams and indices.

You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible.

To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.

Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:

GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
          

If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.

If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

``_ :param repository: The name of the repository to restore a snapshot from. :param snapshot: The name of the snapshot to restore. :param feature_states: The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). :param ignore_index_settings: The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. :param ignore_unavailable: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. :param include_aliases: If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. :param include_global_state: If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. :param index_settings: Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. :param indices: A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param partial: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. :param rename_pattern: A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. :param rename_replacement: The rename replacement string that is used with the `rename_pattern`. :param wait_for_completion: If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_restore' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if feature_states is not None: __body["feature_states"] = feature_states if ignore_index_settings is not None: __body["ignore_index_settings"] = ignore_index_settings if ignore_unavailable is not None: __body["ignore_unavailable"] = ignore_unavailable if include_aliases is not None: __body["include_aliases"] = include_aliases if include_global_state is not None: __body["include_global_state"] = include_global_state if index_settings is not None: __body["index_settings"] = index_settings if indices is not None: __body["indices"] = indices if partial is not None: __body["partial"] = partial if rename_pattern is not None: __body["rename_pattern"] = rename_pattern if rename_replacement is not None: __body["rename_replacement"] = rename_replacement if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.restore", path_parts=__path_parts, ) @_rewrite_parameters() async def status( self, *, repository: t.Optional[str] = None, snapshot: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot.

Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.

If you omit the <snapshot> request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify <repository> and <snapshot> to retrieve information for specific snapshots, even if they're not currently running.

WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).

Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

``_ :param repository: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. :param snapshot: A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. :param ignore_unavailable: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] if repository not in SKIP_IN_PATH and snapshot not in SKIP_IN_PATH: __path_parts = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_status' elif repository not in SKIP_IN_PATH: __path_parts = {"repository": _quote(repository)} __path = f'/_snapshot/{__path_parts["repository"]}/_status' else: __path_parts = {} __path = "/_snapshot/_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.status", path_parts=__path_parts, ) @_rewrite_parameters() async def verify_repository( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Verify a snapshot repository. Check for common misconfigurations in a snapshot repository.

``_ :param name: The name of the snapshot repository to verify. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_verify' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.verify_repository", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/sql.py000066400000000000000000000472141506101734100243760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SqlClient(NamespacedClient): @_rewrite_parameters( body_fields=("cursor",), ) async def clear_cursor( self, *, cursor: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear an SQL search cursor.

``_ :param cursor: Cursor to clear. """ if cursor is None and body is None: raise ValueError("Empty value passed for parameter 'cursor'") __path_parts: t.Dict[str, str] = {} __path = "/_sql/close" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if cursor is not None: __body["cursor"] = cursor __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.clear_cursor", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_async( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it.

If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:

  • Users with the cancel_task cluster privilege.
  • The user who first submitted the search.
``_ :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/delete/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="sql.delete_async", path_parts=__path_parts, ) @_rewrite_parameters() async def get_async( self, *, id: str, delimiter: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search.

If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter only for CSV responses. :param format: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. :param wait_for_completion_timeout: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if delimiter is not None: __query["delimiter"] = delimiter if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="sql.get_async", path_parts=__path_parts, ) @_rewrite_parameters() async def get_async_status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search.

``_ :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="sql.get_async_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_partial_search_results", "catalog", "columnar", "cursor", "fetch_size", "field_multi_value_leniency", "filter", "index_using_frozen", "keep_alive", "keep_on_completion", "page_timeout", "params", "query", "request_timeout", "runtime_mappings", "time_zone", "wait_for_completion_timeout", ), ignore_deprecated_options={"params", "request_timeout"}, ) async def query( self, *, allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, error_trace: t.Optional[bool] = None, fetch_size: t.Optional[int] = None, field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] ] ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, params: t.Optional[t.Sequence[t.Any]] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, time_zone: t.Optional[str] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get SQL search results. Run an SQL request.

``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. :param catalog: The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. :param columnar: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. :param cursor: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. :param fetch_size: The maximum number of rows (or entries) to return in one response. :param field_multi_value_leniency: If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. :param filter: The Elasticsearch query DSL for additional filtering. :param format: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. :param index_using_frozen: If `true`, the search can run on frozen indices. :param keep_alive: The retention period for an async or saved synchronous search. :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. :param page_timeout: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. :param params: The values for parameters in the query. :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. :param runtime_mappings: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. :param time_zone: The ISO-8601 time zone ID for the search. :param wait_for_completion_timeout: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_partial_search_results is not None: __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: __body["columnar"] = columnar if cursor is not None: __body["cursor"] = cursor if fetch_size is not None: __body["fetch_size"] = fetch_size if field_multi_value_leniency is not None: __body["field_multi_value_leniency"] = field_multi_value_leniency if filter is not None: __body["filter"] = filter if index_using_frozen is not None: __body["index_using_frozen"] = index_using_frozen if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if page_timeout is not None: __body["page_timeout"] = page_timeout if params is not None: __body["params"] = params if query is not None: __body["query"] = query if request_timeout is not None: __body["request_timeout"] = request_timeout if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if time_zone is not None: __body["time_zone"] = time_zone if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.query", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query", "fetch_size", "filter", "time_zone"), ) async def translate( self, *, query: t.Optional[str] = None, error_trace: t.Optional[bool] = None, fetch_size: t.Optional[int] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, time_zone: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding cursor.

``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. :param filter: The Elasticsearch query DSL for additional filtering. :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_sql/translate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = query if fetch_size is not None: __body["fetch_size"] = fetch_size if filter is not None: __body["filter"] = filter if time_zone is not None: __body["time_zone"] = time_zone __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.translate", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/ssl.py000066400000000000000000000073001506101734100243700ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class SslClient(NamespacedClient): @_rewrite_parameters() async def certificates( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get SSL certificates.

Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including:

  • Settings for transport and HTTP interfaces
  • TLS settings that are used within authentication realms
  • TLS settings for remote monitoring exporters

The list includes certificates that are used for configuring trust, such as those configured in the xpack.security.transport.ssl.truststore and xpack.security.transport.ssl.certificate_authorities settings. It also includes certificates that are used for configuring server identity, such as xpack.security.http.ssl.keystore and xpack.security.http.ssl.certificate settings.

The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.

NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.

If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ssl.certificates", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/synonyms.py000066400000000000000000000422201506101734100254660ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SynonymsClient(NamespacedClient): @_rewrite_parameters() async def delete_synonym( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a synonym set.

You can only delete a synonyms set that is not in use by any index analyzer.

Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers.

Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.

If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code.

To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it.

``_ :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="synonyms.delete_synonym", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_synonym_rule( self, *, set_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a synonym rule. Delete a synonym rule from a synonym set.

``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. :param refresh: If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the deleted synonym rule """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="synonyms.delete_synonym_rule", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_synonym( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a synonym set.

``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonym", path_parts=__path_parts, ) @_rewrite_parameters() async def get_synonym_rule( self, *, set_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a synonym rule. Get a synonym rule from a synonym set.

``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonym_rule", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_synonyms_sets( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all synonym sets. Get a summary of all defined synonym sets.

``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonyms_sets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("synonyms_set",), ) async def put_synonym( self, *, id: str, synonyms_set: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets.

When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

For practical examples of how to create or update a synonyms set, refer to the External documentation.

``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. :param refresh: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym set """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if synonyms_set is None and body is None: raise ValueError("Empty value passed for parameter 'synonyms_set'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if synonyms_set is not None: __body["synonyms_set"] = synonyms_set __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="synonyms.put_synonym", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("synonyms",), ) async def put_synonym_rule( self, *, set_id: str, rule_id: str, synonyms: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a synonym rule. Create or update a synonym rule in a synonym set.

If any of the synonym rules included is invalid, the API returns an error.

When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. :param synonyms: The synonym rule information definition, which must be in Solr format. :param refresh: If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym rule """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") if synonyms is None and body is None: raise ValueError("Empty value passed for parameter 'synonyms'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if synonyms is not None: __body["synonyms"] = synonyms __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="synonyms.put_synonym_rule", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/tasks.py000066400000000000000000000325471506101734100247270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class TasksClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def cancel( self, *, task_id: t.Optional[str] = None, actions: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a task.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.

To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the ?detailed parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that is used to limit the request. :param nodes: A comma-separated list of node IDs or names that is used to limit the request. :param parent_task_id: A parent task ID that is used to limit the tasks. :param wait_for_completion: If true, the request blocks until all found tasks are complete. """ __path_parts: t.Dict[str, str] if task_id not in SKIP_IN_PATH: __path_parts = {"task_id": _quote(task_id)} __path = f'/_tasks/{__path_parts["task_id"]}/_cancel' else: __path_parts = {} __path = "/_tasks/_cancel" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="tasks.cancel", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def get( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get task information. Get information about a task currently running in the cluster.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_tasks/{__path_parts["task_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="tasks.get", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) async def list( self, *, actions: t.Optional[t.Union[str, t.Sequence[str]]] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, group_by: t.Optional[ t.Union[str, t.Literal["nodes", "none", "parents"]] ] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

Identifying running tasks

The X-Opaque-Id header, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example:

curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents"
          

The API returns the following result:

HTTP/1.1 200 OK
          X-Opaque-Id: 123456
          content-type: application/json; charset=UTF-8
          content-length: 831

          {
            "tasks" : {
              "u5lcZHqcQhu-rUoFaqDphA:45" : {
                "node" : "u5lcZHqcQhu-rUoFaqDphA",
                "id" : 45,
                "type" : "transport",
                "action" : "cluster:monitor/tasks/lists",
                "start_time_in_millis" : 1513823752749,
                "running_time_in_nanos" : 293139,
                "cancellable" : false,
                "headers" : {
                  "X-Opaque-Id" : "123456"
                },
                "children" : [
                  {
                    "node" : "u5lcZHqcQhu-rUoFaqDphA",
                    "id" : 46,
                    "type" : "direct",
                    "action" : "cluster:monitor/tasks/lists[n]",
                    "start_time_in_millis" : 1513823752750,
                    "running_time_in_nanos" : 92133,
                    "cancellable" : false,
                    "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45",
                    "headers" : {
                      "X-Opaque-Id" : "123456"
                    }
                  }
                ]
              }
            }
           }
          

In this example, X-Opaque-Id: 123456 is the ID as a part of the response header. The X-Opaque-Id in the task headers is the ID for the task that was initiated by the REST request. The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. :param detailed: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. :param group_by: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. :param nodes: A comma-separated list of node IDs or names that is used to limit the returned information. :param parent_task_id: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. :param timeout: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ __path_parts: t.Dict[str, str] = {} __path = "/_tasks" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if group_by is not None: __query["group_by"] = group_by if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="tasks.list", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/text_structure.py000066400000000000000000001173311506101734100267010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class TextStructureClient(NamespacedClient): @_rewrite_parameters() async def find_field_structure( self, *, field: str, index: str, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, documents_to_sample: t.Optional[int] = None, ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] ] ] = None, grok_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of a text field. Find the structure of a text field in an Elasticsearch index.

This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.

The response from the API contains:

  • Sample messages.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen.

``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. :param column_names: If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. :param delimiter: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param documents_to_sample: The number of documents to include in the structural analysis. The minimum value is 2. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. :param explain: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param quote: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. """ if field is None: raise ValueError("Empty value passed for parameter 'field'") if index is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_field_structure" __query: t.Dict[str, t.Any] = {} if field is not None: __query["field"] = field if index is not None: __query["index"] = index if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if documents_to_sample is not None: __query["documents_to_sample"] = documents_to_sample if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="text_structure.find_field_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("messages",), ) async def find_message_structure( self, *, messages: t.Optional[t.Sequence[str]] = None, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] ] ] = None, grok_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch.

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.

The response from the API contains:

  • Sample messages.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen.

``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. :param delimiter: If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. :param explain: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param quote: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. """ if messages is None and body is None: raise ValueError("Empty value passed for parameter 'messages'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_message_structure" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format if not __body: if messages is not None: __body["messages"] = messages __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.find_message_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_name="text_files", ) async def find_structure( self, *, text_files: t.Optional[t.Sequence[t.Any]] = None, body: t.Optional[t.Sequence[t.Any]] = None, charset: t.Optional[str] = None, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[str] = None, explain: t.Optional[bool] = None, format: t.Optional[str] = None, grok_pattern: t.Optional[str] = None, has_header_row: t.Optional[bool] = None, line_merge_size_limit: t.Optional[int] = None, lines_to_sample: t.Optional[int] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch.

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.

The response from the API contains:

  • A couple of messages from the beginning of the text.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

``_ :param text_files: :param charset: The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. :param column_names: If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. :param delimiter: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. :param explain: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. :param format: The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param has_header_row: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. :param line_merge_size_limit: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. :param lines_to_sample: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. :param quote: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. """ if text_files is None and body is None: raise ValueError( "Empty value passed for parameters 'text_files' and 'body', one of them should be set." ) elif text_files is not None and body is not None: raise ValueError("Cannot set both 'text_files' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_structure" __query: t.Dict[str, t.Any] = {} if charset is not None: __query["charset"] = charset if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if explain is not None: __query["explain"] = explain if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if has_header_row is not None: __query["has_header_row"] = has_header_row if line_merge_size_limit is not None: __query["line_merge_size_limit"] = line_merge_size_limit if lines_to_sample is not None: __query["lines_to_sample"] = lines_to_sample if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format __body = text_files if text_files is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.find_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("grok_pattern", "text"), ) async def test_grok_pattern( self, *, grok_pattern: t.Optional[str] = None, text: t.Optional[t.Sequence[str]] = None, ecs_compatibility: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. """ if grok_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'grok_pattern'") if text is None and body is None: raise ValueError("Empty value passed for parameter 'text'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/test_grok_pattern" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grok_pattern is not None: __body["grok_pattern"] = grok_pattern if text is not None: __body["text"] = text __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.test_grok_pattern", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/transform.py000066400000000000000000001337311506101734100256120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class TransformClient(NamespacedClient): @_rewrite_parameters() async def delete_transform( self, *, transform_id: str, delete_dest_index: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a transform.

``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted :param force: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' __query: t.Dict[str, t.Any] = {} if delete_dest_index is not None: __query["delete_dest_index"] = delete_dest_index if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="transform.delete_transform", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_transform( self, *, transform_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get transforms. Get configuration information for transforms.

``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param exclude_generated: Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of transforms. :param size: Specifies the maximum number of transforms to obtain. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' else: __path_parts = {} __path = "/_transform" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="transform.get_transform", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def get_transform_stats( self, *, transform_id: t.Union[str, t.Sequence[str]], allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get transform stats.

Get usage information for transforms.

``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param from_: Skips the specified number of transforms. :param size: Specifies the maximum number of transforms to obtain. :param timeout: Controls the time to wait for the stats """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_stats' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="transform.get_transform_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "dest", "frequency", "latest", "pivot", "retention_policy", "settings", "source", "sync", ), ) async def preview_transform( self, *, transform_id: t.Optional[str] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, latest: t.Optional[t.Mapping[str, t.Any]] = None, pivot: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Mapping[str, t.Any]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration.

It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations.

``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. :param description: Free text description of the transform. :param dest: The destination for the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. :param latest: The latest method transforms the data by finding the latest document for each unique key. :param pivot: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param source: The source of the data for the transform. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_preview' else: __path_parts = {} __path = "/_transform/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if frequency is not None: __body["frequency"] = frequency if latest is not None: __body["latest"] = latest if pivot is not None: __body["pivot"] = pivot if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if source is not None: __body["source"] = source if sync is not None: __body["sync"] = sync if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.preview_transform", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "dest", "source", "description", "frequency", "latest", "meta", "pivot", "retention_policy", "settings", "sync", ), parameter_aliases={"_meta": "meta"}, ) async def put_transform( self, *, transform_id: str, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, defer_validation: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, latest: t.Optional[t.Mapping[str, t.Any]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pivot: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Mapping[str, t.Any]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a transform. Creates a transform.

A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity.

You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of group_by fields in the pivot object. If you choose to use the latest method, the entities are defined by the unique_key field values in the latest object.

You must have create_index, index, and read privileges on the destination index and read and view_index_metadata privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations.

NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any .transform-internal* indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on .transform-internal* indices. If you used transforms prior to 7.5, also do not give users any privileges on .data-frame-internal* indices.

``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. :param dest: The destination for the transform. :param source: The source of the data for the transform. :param defer_validation: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. :param description: Free text description of the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. :param latest: The latest method transforms the data by finding the latest document for each unique key. :param meta: Defines optional transform metadata. :param pivot: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_validation is not None: __query["defer_validation"] = defer_validation if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if description is not None: __body["description"] = description if frequency is not None: __body["frequency"] = frequency if latest is not None: __body["latest"] = latest if meta is not None: __body["_meta"] = meta if pivot is not None: __body["pivot"] = pivot if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if sync is not None: __body["sync"] = sync __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.put_transform", path_parts=__path_parts, ) @_rewrite_parameters() async def reset_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset a transform.

Before you can reset it, you must stop it; alternatively, use the force query parameter. If the destination index was created by the transform, it is deleted.

``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_reset' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.reset_transform", path_parts=__path_parts, ) @_rewrite_parameters() async def schedule_now_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Schedule a transform to start now.

Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at now + frequency unless the API is called again in the meantime.

``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_schedule_now' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.schedule_now_transform", path_parts=__path_parts, ) @_rewrite_parameters() async def set_upgrade_mode( self, *, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set upgrade_mode for transform indices. Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, there must be no transforms running. You can close the transforms, do the upgrade, then open all the transforms again. Alternatively, you can use this API to temporarily halt tasks associated with the transforms and prevent new transforms from opening. You can also use this API during upgrades that do not require you to reindex your transform indices, though stopping transforms is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get transform info API.

``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all transform tasks and prohibits new transform tasks from starting. :param timeout: The time to wait for the request to be completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_transform/set_upgrade_mode" __query: t.Dict[str, t.Any] = {} if enabled is not None: __query["enabled"] = enabled if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.set_upgrade_mode", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) async def start_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a transform.

When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of scripted_metric or bucket_script aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform.

When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—​with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations.

``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.start_transform", path_parts=__path_parts, ) @_rewrite_parameters() async def stop_transform( self, *, transform_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_checkpoint: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop transforms. Stops one or more transforms.

``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. :param force: If it is true, the API forcefully stops the transforms. :param timeout: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. :param wait_for_checkpoint: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. :param wait_for_completion: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_checkpoint is not None: __query["wait_for_checkpoint"] = wait_for_checkpoint if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.stop_transform", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "dest", "frequency", "meta", "retention_policy", "settings", "source", "sync", ), parameter_aliases={"_meta": "meta"}, ) async def update_transform( self, *, transform_id: str, defer_validation: t.Optional[bool] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Union[None, t.Mapping[str, t.Any]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a transform. Updates certain properties of a transform.

All updated properties except description do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata privileges for the source indices. You must also have index and read privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges.

``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. :param description: Free text description of the transform. :param dest: The destination for the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. :param meta: Defines optional transform metadata. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param source: The source of the data for the transform. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_validation is not None: __query["defer_validation"] = defer_validation if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if frequency is not None: __body["frequency"] = frequency if meta is not None: __body["_meta"] = meta if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if source is not None: __body["source"] = source if sync is not None: __body["sync"] = sync __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.update_transform", path_parts=__path_parts, ) @_rewrite_parameters() async def upgrade_transforms( self, *, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Upgrade all transforms.

Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.

If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished.

To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade.

``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_transform/_upgrade" __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.upgrade_transforms", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/utils.py000066400000000000000000000026511506101734100247330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ..._sync.client.utils import ( _TYPE_ASYNC_SNIFF_CALLBACK, _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, Stability, _base64_auth_header, _quote, _quote_query, _rewrite_parameters, _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, ) __all__ = [ "CLIENT_META_SERVICE", "_TYPE_ASYNC_SNIFF_CALLBACK", "_base64_auth_header", "_quote", "_quote_query", "_TYPE_HOSTS", "SKIP_IN_PATH", "Stability", "client_node_configs", "_rewrite_parameters", "_stability_warning", "is_requests_http_auth", "is_requests_node_class", ] python-elasticsearch-9.1.1/elasticsearch/_async/client/watcher.py000066400000000000000000001126541506101734100252350ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class WatcherClient(NamespacedClient): @_rewrite_parameters() async def ack_watch( self, *, watch_id: str, action_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions.

The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.

IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution.

Acknowledging an action throttles further executions of that action until its ack.state is reset to awaits_successful_execution. This happens when the condition of the watch is not met (the condition evaluates to false). To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation.

``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] if watch_id not in SKIP_IN_PATH and action_id not in SKIP_IN_PATH: __path_parts = { "watch_id": _quote(watch_id), "action_id": _quote(action_id), } __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_ack/{__path_parts["action_id"]}' elif watch_id not in SKIP_IN_PATH: __path_parts = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_ack' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.ack_watch", path_parts=__path_parts, ) @_rewrite_parameters() async def activate_watch( self, *, watch_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate a watch. A watch can be either active or inactive.

``_ :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_activate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.activate_watch", path_parts=__path_parts, ) @_rewrite_parameters() async def deactivate_watch( self, *, watch_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Deactivate a watch. A watch can be either active or inactive.

``_ :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_deactivate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.deactivate_watch", path_parts=__path_parts, ) @_rewrite_parameters() async def delete_watch( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a watch. When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.

Deleting a watch does not delete any watch execution records related to this watch from the watch history.

IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the .watches index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

``_ :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="watcher.delete_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "action_modes", "alternative_input", "ignore_condition", "record_execution", "simulated_actions", "trigger_data", "watch", ), ) async def execute_watch( self, *, id: t.Optional[str] = None, action_modes: t.Optional[ t.Mapping[ str, t.Union[ str, t.Literal[ "execute", "force_execute", "force_simulate", "simulate", "skip" ], ], ] ] = None, alternative_input: t.Optional[t.Mapping[str, t.Any]] = None, debug: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_condition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, record_execution: t.Optional[bool] = None, simulated_actions: t.Optional[t.Mapping[str, t.Any]] = None, trigger_data: t.Optional[t.Mapping[str, t.Any]] = None, watch: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.

For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.

You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher.

When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index a, but not index b, then the exact same set of rules will apply during execution of a watch.

When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.

``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the watch execution. :param alternative_input: When present, the watch uses this object as a payload instead of executing its own input. :param debug: Defines whether the watch runs in debug mode. :param ignore_condition: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. :param record_execution: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. :param simulated_actions: :param trigger_data: This structure is parsed as the data of the trigger event that will be used during the watch execution. :param watch: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}/_execute' else: __path_parts = {} __path = "/_watcher/watch/_execute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if debug is not None: __query["debug"] = debug if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if action_modes is not None: __body["action_modes"] = action_modes if alternative_input is not None: __body["alternative_input"] = alternative_input if ignore_condition is not None: __body["ignore_condition"] = ignore_condition if record_execution is not None: __body["record_execution"] = record_execution if simulated_actions is not None: __body["simulated_actions"] = simulated_actions if trigger_data is not None: __body["trigger_data"] = trigger_data if watch is not None: __body["watch"] = watch if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.execute_watch", path_parts=__path_parts, ) @_rewrite_parameters() async def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Watcher index settings. Get settings for the Watcher internal index (.watches). Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() async def get_watch( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a watch.

``_ :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.get_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "actions", "condition", "input", "metadata", "throttle_period", "throttle_period_in_millis", "transform", "trigger", ), ) async def put_watch( self, *, id: str, actions: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, active: t.Optional[bool] = None, condition: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, throttle_period: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, throttle_period_in_millis: t.Optional[t.Any] = None, transform: t.Optional[t.Mapping[str, t.Any]] = None, trigger: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a watch. When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine. Typically for the schedule trigger, the scheduler is the trigger engine.

IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the .watches index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the .watches index.

When you add a watch you can also define its initial active state by setting the active parameter.

When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index a, but not index b, the same will apply when the watch runs.

``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. :param active: The initial state of the watch. The default value is `true`, which means the watch is active by default. :param condition: The condition that defines if the actions should be run. :param if_primary_term: only update the watch if the last operation that has changed the watch has the specified primary term :param if_seq_no: only update the watch if the last operation that has changed the watch has the specified sequence number :param input: The input that defines the input that loads the data for the watch. :param metadata: Metadata JSON that will be copied into the history entries. :param throttle_period: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. :param throttle_period_in_millis: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. :param transform: The transform that processes the watch payload to prepare it for the watch actions. :param trigger: The trigger that defines when the watch should run. :param version: Explicit version number for concurrency control """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if active is not None: __query["active"] = active if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if version is not None: __query["version"] = version if not __body: if actions is not None: __body["actions"] = actions if condition is not None: __body["condition"] = condition if input is not None: __body["input"] = input if metadata is not None: __body["metadata"] = metadata if throttle_period is not None: __body["throttle_period"] = throttle_period if throttle_period_in_millis is not None: __body["throttle_period_in_millis"] = throttle_period_in_millis if transform is not None: __body["transform"] = transform if trigger is not None: __body["trigger"] = trigger if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.put_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) async def query_watches( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query.

Note that only the _id and metadata.* fields are queryable or sortable.

``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. :param search_after: Retrieve the next page of hits using a set of sort values from the previous page. :param size: The number of hits to return. It must be non-negative. :param sort: One or more fields used to sort the search results. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_query/watches" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.query_watches", path_parts=__path_parts, ) @_rewrite_parameters() async def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the watch service. Start the Watcher service if it is not already running.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="watcher.start", path_parts=__path_parts, ) @_rewrite_parameters() async def stats( self, *, metric: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "_all", "current_watches", "pending_watches", "queued_watches", ], ] ], t.Union[ str, t.Literal[ "_all", "current_watches", "pending_watches", "queued_watches" ], ], ] ] = None, emit_stacktraces: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter.

``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each watch that is running. """ __path_parts: t.Dict[str, str] if metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_watcher/stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_watcher/stats" __query: t.Dict[str, t.Any] = {} if emit_stacktraces is not None: __query["emit_stacktraces"] = emit_stacktraces if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.stats", path_parts=__path_parts, ) @_rewrite_parameters() async def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop the watch service. Stop the Watcher service if it is running.

``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="watcher.stop", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_auto_expand_replicas", "index_number_of_replicas"), parameter_aliases={ "index.auto_expand_replicas": "index_auto_expand_replicas", "index.number_of_replicas": "index_number_of_replicas", }, ) async def update_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_auto_expand_replicas: t.Optional[str] = None, index_number_of_replicas: t.Optional[int] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update Watcher index settings. Update settings for the Watcher internal index (.watches). Only a subset of settings can be modified. This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*, index.routing.allocation.include.* and index.routing.allocation.require.*. Modification of index.routing.allocation.include._tier_preference is an exception and is not allowed as the Watcher shards must always be in the data_content tier.

``_ :param index_auto_expand_replicas: :param index_number_of_replicas: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if index_auto_expand_replicas is not None: __body["index.auto_expand_replicas"] = index_auto_expand_replicas if index_number_of_replicas is not None: __body["index.number_of_replicas"] = index_number_of_replicas __headers = {"accept": "application/json", "content-type": "application/json"} return await self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.update_settings", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/client/xpack.py000066400000000000000000000120161506101734100246750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class XPackClient(NamespacedClient): def __getattr__(self, attr_name: str) -> t.Any: return getattr(self.client, attr_name) # AUTO-GENERATED-API-DEFINITIONS # @_rewrite_parameters() async def info( self, *, accept_enterprise: t.Optional[bool] = None, categories: t.Optional[ t.Sequence[t.Union[str, t.Literal["build", "features", "license"]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get information. The information provided by the API includes:

  • Build information including the build number and timestamp.
  • License information about the currently installed license.
  • Feature information for the features that are currently enabled and available under the current license.
``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include in the response. For example, `build,license,features`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack" __query: t.Dict[str, t.Any] = {} if accept_enterprise is not None: __query["accept_enterprise"] = accept_enterprise if categories is not None: __query["categories"] = categories if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="xpack.info", path_parts=__path_parts, ) @_rewrite_parameters() async def usage( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack/usage" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return await self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="xpack.usage", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_async/helpers.py000066400000000000000000000544211506101734100237610ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import logging from typing import ( Any, AsyncIterable, AsyncIterator, Callable, Collection, Dict, Iterable, List, MutableMapping, Optional, Tuple, TypeVar, Union, ) from ..exceptions import ApiError, NotFoundError, TransportError from ..helpers.actions import ( _TYPE_BULK_ACTION, _TYPE_BULK_ACTION_BODY, _TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_HEADER_AND_BODY, _ActionChunker, _process_bulk_chunk_error, _process_bulk_chunk_success, expand_action, ) from ..helpers.errors import ScanError from ..serializer import Serializer from .client import AsyncElasticsearch # noqa logger = logging.getLogger("elasticsearch.helpers") T = TypeVar("T") async def _chunk_actions( actions: AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY], chunk_size: int, max_chunk_bytes: int, serializer: Serializer, ) -> AsyncIterable[ Tuple[ List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], List[bytes], ] ]: """ Split actions into chunks by number or size, serialize them into strings in the process. """ chunker = _ActionChunker( chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer ) async for action, data in actions: ret = chunker.feed(action, data) if ret: yield ret ret = chunker.flush() if ret: yield ret async def _process_bulk_chunk( client: AsyncElasticsearch, bulk_actions: List[bytes], bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], raise_on_exception: bool = True, raise_on_error: bool = True, ignore_status: Union[int, Collection[int]] = (), *args: Any, **kwargs: Any, ) -> AsyncIterable[Tuple[bool, Dict[str, Any]]]: """ Send a bulk request to elasticsearch and process the output. """ if isinstance(ignore_status, int): ignore_status = (ignore_status,) try: # send the actual request resp = await client.bulk(*args, operations=bulk_actions, **kwargs) # type: ignore[arg-type] except ApiError as e: gen = _process_bulk_chunk_error( error=e, bulk_data=bulk_data, ignore_status=ignore_status, raise_on_exception=raise_on_exception, raise_on_error=raise_on_error, ) else: gen = _process_bulk_chunk_success( resp=resp.body, bulk_data=bulk_data, ignore_status=ignore_status, raise_on_error=raise_on_error, ) for item in gen: yield item def aiter(x: Union[Iterable[T], AsyncIterable[T]]) -> AsyncIterator[T]: """Turns an async iterable or iterable into an async iterator""" if hasattr(x, "__anext__"): return x # type: ignore[return-value] elif hasattr(x, "__aiter__"): return x.__aiter__() async def f() -> AsyncIterable[T]: ix: Iterable[T] = x for item in ix: yield item return f().__aiter__() async def azip( *iterables: Union[Iterable[T], AsyncIterable[T]] ) -> AsyncIterable[Tuple[T, ...]]: """Zips async iterables and iterables into an async iterator with the same behavior as zip() """ aiters = [aiter(x) for x in iterables] try: while True: yield tuple([await x.__anext__() for x in aiters]) except StopAsyncIteration: pass async def async_streaming_bulk( client: AsyncElasticsearch, actions: Union[Iterable[_TYPE_BULK_ACTION], AsyncIterable[_TYPE_BULK_ACTION]], chunk_size: int = 500, max_chunk_bytes: int = 100 * 1024 * 1024, raise_on_error: bool = True, expand_action_callback: Callable[ [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY ] = expand_action, raise_on_exception: bool = True, max_retries: int = 0, initial_backoff: float = 2, max_backoff: float = 600, yield_ok: bool = True, ignore_status: Union[int, Collection[int]] = (), retry_on_status: Union[int, Collection[int]] = (429,), *args: Any, **kwargs: Any, ) -> AsyncIterable[Tuple[bool, Dict[str, Any]]]: """ Streaming bulk consumes actions from the iterable passed in and yields results per action. For non-streaming usecases use :func:`~elasticsearch.helpers.async_bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. If you specify ``max_retries`` it will also retry any documents that were rejected with a ``429`` status code. Use ``retry_on_status`` to configure which status codes will be retried. To do this it will wait (**by calling asyncio.sleep which will block**) for ``initial_backoff`` seconds and then, every subsequent rejection for the same chunk, for double the time every time up to ``max_backoff`` seconds. :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use :arg actions: iterable or async iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). :arg retry_on_status: HTTP status code that will trigger a retry. (if `None` is specified only status 429 will retry). :arg max_retries: maximum number of times a document will be retried when retry_on_status (defaulting to ``429``) is received, set to 0 (default) for no retries :arg initial_backoff: number of seconds we should wait before the first retry. Any subsequent retries will be powers of ``initial_backoff * 2**retry_number`` :arg max_backoff: maximum number of seconds a retry will wait :arg yield_ok: if set to False will skip successful documents in the output :arg ignore_status: list of HTTP status code that you want to ignore """ client = client.options() client._client_meta = (("h", "bp"),) if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) async def map_actions() -> AsyncIterable[_TYPE_BULK_ACTION_HEADER_AND_BODY]: async for item in aiter(actions): yield expand_action_callback(item) serializer = client.transport.serializers.get_serializer("application/json") bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ] bulk_actions: List[bytes] async for bulk_data, bulk_actions in _chunk_actions( map_actions(), chunk_size, max_chunk_bytes, serializer ): for attempt in range(max_retries + 1): to_retry: List[bytes] = [] to_retry_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ] = [] if attempt: await asyncio.sleep( min(max_backoff, initial_backoff * 2 ** (attempt - 1)) ) try: data: Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ok: bool info: Dict[str, Any] async for data, (ok, info) in azip( # type: ignore[assignment, misc] bulk_data, _process_bulk_chunk( client, bulk_actions, bulk_data, raise_on_exception, raise_on_error, ignore_status, *args, **kwargs, ), ): if not ok: action, info = info.popitem() # retry if retries enabled, we are not in the last attempt, # and status in retry_on_status (defaulting to 429) if ( max_retries and info["status"] in retry_on_status and (attempt + 1) <= max_retries ): # _process_bulk_chunk expects strings so we need to # re-serialize the data to_retry.extend(map(serializer.dumps, data)) to_retry_data.append(data) else: yield ok, {action: info} elif yield_ok: yield ok, info except ApiError as e: # suppress any status in retry_on_status (429 by default) # since we will retry them if attempt == max_retries or e.status_code not in retry_on_status: raise else: if not to_retry: break # retry only subset of documents that didn't succeed bulk_actions, bulk_data = to_retry, to_retry_data async def async_bulk( client: AsyncElasticsearch, actions: Union[Iterable[_TYPE_BULK_ACTION], AsyncIterable[_TYPE_BULK_ACTION]], stats_only: bool = False, ignore_status: Union[int, Collection[int]] = (), *args: Any, **kwargs: Any, ) -> Tuple[int, Union[int, List[Any]]]: """ Helper for the :meth:`~elasticsearch.AsyncElasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if ``stats_only`` is set to ``True``. Note that by default we raise a ``BulkIndexError`` when we encounter an error so options like ``stats_only`` only+ apply when ``raise_on_error`` is set to ``False``. When errors are being collected original document data is included in the error dictionary which can lead to an extra high memory usage. If you need to process a lot of data and want to ignore/collect errors please consider using the :func:`~elasticsearch.helpers.async_streaming_bulk` helper which will just return the errors and not store them in memory. :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses :arg ignore_status: list of HTTP status code that you want to ignore Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.async_streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.async_streaming_bulk` for more accepted parameters. """ success, failed = 0, 0 # list of errors to be collected is not stats_only errors = [] # make streaming_bulk yield successful results so we can count them kwargs["yield_ok"] = True async for ok, item in async_streaming_bulk( client, actions, ignore_status=ignore_status, *args, **kwargs # type: ignore[misc] ): # go through request-response pairs and detect failures if not ok: if not stats_only: errors.append(item) failed += 1 else: success += 1 return success, failed if stats_only else errors async def async_scan( client: AsyncElasticsearch, query: Optional[Any] = None, scroll: str = "5m", raise_on_error: bool = True, preserve_order: bool = False, size: int = 1000, request_timeout: Optional[float] = None, clear_scroll: bool = True, scroll_kwargs: Optional[MutableMapping[str, Any]] = None, **kwargs: Any, ) -> AsyncIterable[Dict[str, Any]]: """ Simple abstraction on top of the :meth:`~elasticsearch.AsyncElasticsearch.scroll` api - a simple iterator that yields all hits as returned by underlining scroll requests. By default scan does not return results in any pre-determined order. To have a standard order in the returned documents (either by score or explicit sort definition) when scrolling, use ``preserve_order=True``. This may be an expensive operation and will negate the performance benefits of using ``scan``. :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use :arg query: body for the :meth:`~elasticsearch.AsyncElasticsearch.search` api :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg raise_on_error: raises an exception (``ScanError``) if an error is encountered (some shards fail to execute). By default we raise. :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will cause the scroll to paginate with preserving the order. Note that this can be an extremely expensive operation and can easily lead to unpredictable results, use with caution. :arg size: size (per shard) of the batch send at each iteration. :arg request_timeout: explicit timeout for each call to ``scan`` :arg clear_scroll: explicitly calls delete on the scroll id via the clear scroll API at the end of the method on completion or error, defaults to true. :arg scroll_kwargs: additional kwargs to be passed to :meth:`~elasticsearch.AsyncElasticsearch.scroll` Any additional keyword arguments will be passed to the initial :meth:`~elasticsearch.AsyncElasticsearch.search` call: .. code-block:: python async_scan( client, query={"query": {"match": {"title": "python"}}}, index="orders-*" ) """ scroll_kwargs = scroll_kwargs or {} if not preserve_order: query = query.copy() if query else {} query["sort"] = "_doc" def pop_transport_kwargs(kw: MutableMapping[str, Any]) -> MutableMapping[str, Any]: # Grab options that should be propagated to every # API call within this helper instead of just 'search()' transport_kwargs = {} for key in ("headers", "api_key", "http_auth", "basic_auth", "bearer_auth"): try: value = kw.pop(key) if key == "http_auth": key = "basic_auth" transport_kwargs[key] = value except KeyError: pass return transport_kwargs client = client.options( request_timeout=request_timeout, **pop_transport_kwargs(kwargs) ) client._client_meta = (("h", "s"),) # Setting query={"from": ...} would make 'from' be used # as a keyword argument instead of 'from_'. We handle that here. def normalize_from_keyword(kw: MutableMapping[str, Any]) -> None: if "from" in kw: kw["from_"] = kw.pop("from") normalize_from_keyword(kwargs) try: search_kwargs = query.copy() if query else {} normalize_from_keyword(search_kwargs) search_kwargs.update(kwargs) search_kwargs["scroll"] = scroll search_kwargs["size"] = size resp = await client.search(**search_kwargs) # Try the old deprecated way if we fail immediately on parameters. except TypeError: search_kwargs = kwargs.copy() search_kwargs["scroll"] = scroll search_kwargs["size"] = size resp = await client.search(body=query, **search_kwargs) scroll_id: Optional[str] = resp.get("_scroll_id") scroll_transport_kwargs = pop_transport_kwargs(scroll_kwargs) if scroll_transport_kwargs: scroll_client = client.options(**scroll_transport_kwargs) else: scroll_client = client try: while scroll_id and resp["hits"]["hits"]: for hit in resp["hits"]["hits"]: yield hit # Default to 0 if the value isn't included in the response shards_info: Dict[str, int] = resp["_shards"] shards_successful = shards_info.get("successful", 0) shards_skipped = shards_info.get("skipped", 0) shards_total = shards_info.get("total", 0) # check if we have any errors if (shards_successful + shards_skipped) < shards_total: shards_message = "Scroll request has only succeeded on %d (+%d skipped) shards out of %d." logger.warning( shards_message, shards_successful, shards_skipped, shards_total, ) if raise_on_error: raise ScanError( scroll_id, shards_message % ( shards_successful, shards_skipped, shards_total, ), ) resp = await scroll_client.scroll( scroll_id=scroll_id, scroll=scroll, **scroll_kwargs ) scroll_id = resp.get("_scroll_id") finally: if scroll_id and clear_scroll: await client.options(ignore_status=404).clear_scroll(scroll_id=scroll_id) async def async_reindex( client: AsyncElasticsearch, source_index: Union[str, Collection[str]], target_index: str, query: Any = None, target_client: Optional[AsyncElasticsearch] = None, chunk_size: int = 500, scroll: str = "5m", op_type: Optional[str] = None, scan_kwargs: MutableMapping[str, Any] = {}, bulk_kwargs: MutableMapping[str, Any] = {}, ) -> Tuple[int, Union[int, List[Any]]]: """ Reindex all documents from one index that satisfy a given query to another, potentially (if `target_client` is specified) on a different cluster. If you don't specify the query you will reindex all the documents. Since ``2.3`` a :meth:`~elasticsearch.AsyncElasticsearch.reindex` api is available as part of elasticsearch itself. It is recommended to use the api instead of this helper wherever possible. The helper is here mostly for backwards compatibility and for situations where more flexibility is needed. .. note:: This helper doesn't transfer mappings, just the data. :arg client: instance of :class:`~elasticsearch.AsyncElasticsearch` to use (for read if `target_client` is specified as well) :arg source_index: index (or list of indices) to read documents from :arg target_index: name of the index in the target cluster to populate :arg query: body for the :meth:`~elasticsearch.AsyncElasticsearch.search` api :arg target_client: optional, is specified will be used for writing (thus enabling reindex between clusters) :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg op_type: Explicit operation type. Defaults to '_index'. Data streams must be set to 'create'. If not specified, will auto-detect if target_index is a data stream. :arg scan_kwargs: additional kwargs to be passed to :func:`~elasticsearch.helpers.async_scan` :arg bulk_kwargs: additional kwargs to be passed to :func:`~elasticsearch.helpers.async_bulk` """ target_client = client if target_client is None else target_client docs = async_scan( client, query=query, index=source_index, scroll=scroll, **scan_kwargs ) async def _change_doc_index( hits: AsyncIterable[Dict[str, Any]], index: str, op_type: Optional[str], ) -> AsyncIterable[Dict[str, Any]]: async for h in hits: h["_index"] = index if op_type is not None: h["_op_type"] = op_type if "fields" in h: h.update(h.pop("fields")) yield h kwargs = {"stats_only": True} kwargs.update(bulk_kwargs) is_data_stream = False try: # Verify if the target_index is data stream or index data_streams = await target_client.indices.get_data_stream( name=target_index, expand_wildcards="all" ) is_data_stream = any( data_stream["name"] == target_index for data_stream in data_streams["data_streams"] ) except (TransportError, KeyError, NotFoundError): # If its not data stream, might be index pass if is_data_stream: if op_type not in (None, "create"): raise ValueError("Data streams must have 'op_type' set to 'create'") else: op_type = "create" return await async_bulk( target_client, _change_doc_index(docs, target_index, op_type), chunk_size=chunk_size, **kwargs, ) python-elasticsearch-9.1.1/elasticsearch/_otel.py000066400000000000000000000101071506101734100221360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import annotations import contextlib import os from typing import Generator, Literal, Mapping try: from opentelemetry import trace _tracer: trace.Tracer | None = trace.get_tracer("elasticsearch-api") except ImportError: _tracer = None from elastic_transport import OpenTelemetrySpan # Valid values for the enabled config are 'true' and 'false'. Default is 'true'. ENABLED_ENV_VAR = "OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_ENABLED" # Describes how to handle search queries in the request body when assigned to # a span attribute. # Valid values are 'omit' and 'raw'. # Default is 'omit' as 'raw' has security implications. BODY_STRATEGY_ENV_VAR = "OTEL_PYTHON_INSTRUMENTATION_ELASTICSEARCH_CAPTURE_SEARCH_QUERY" DEFAULT_BODY_STRATEGY = "omit" class OpenTelemetry: def __init__( self, enabled: bool | None = None, tracer: trace.Tracer | None = None, body_strategy: Literal["omit", "raw"] | None = None, ): if enabled is None: enabled = os.environ.get(ENABLED_ENV_VAR, "true") == "true" self.tracer = tracer or _tracer self.enabled = enabled and self.tracer is not None if body_strategy is not None: self.body_strategy = body_strategy else: self.body_strategy = os.environ.get( BODY_STRATEGY_ENV_VAR, DEFAULT_BODY_STRATEGY ) # type: ignore[assignment] assert self.body_strategy in ("omit", "raw") @contextlib.contextmanager def span( self, method: str, *, endpoint_id: str | None, path_parts: Mapping[str, str], ) -> Generator[OpenTelemetrySpan]: if not self.enabled or self.tracer is None: yield OpenTelemetrySpan(None) return span_name = endpoint_id or method with self.tracer.start_as_current_span(span_name) as otel_span: otel_span.set_attribute("http.request.method", method) otel_span.set_attribute("db.system.name", "elasticsearch") if endpoint_id is not None: otel_span.set_attribute("db.operation.name", endpoint_id) for key, value in path_parts.items(): otel_span.set_attribute(f"db.operation.parameter.{key}", value) yield OpenTelemetrySpan( otel_span, endpoint_id=endpoint_id, body_strategy=self.body_strategy, ) @contextlib.contextmanager def helpers_span(self, span_name: str) -> Generator[OpenTelemetrySpan]: if not self.enabled or self.tracer is None: yield OpenTelemetrySpan(None) return with self.tracer.start_as_current_span(span_name) as otel_span: otel_span.set_attribute("db.system.name", "elasticsearch") otel_span.set_attribute("db.operation.name", span_name) # Without a request method, Elastic APM does not display the traces otel_span.set_attribute("http.request.method", "null") yield OpenTelemetrySpan(otel_span) @contextlib.contextmanager def use_span(self, span: OpenTelemetrySpan) -> Generator[None]: if not self.enabled or self.tracer is None or span.otel_span is None: yield return with trace.use_span(span.otel_span): yield python-elasticsearch-9.1.1/elasticsearch/_sync/000077500000000000000000000000001506101734100215765ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/_sync/__init__.py000066400000000000000000000014231506101734100237070ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/_sync/client/000077500000000000000000000000001506101734100230545ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/_sync/client/__init__.py000066400000000000000000012277371506101734100252100ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import typing as t from elastic_transport import ( BaseNode, BinaryApiResponse, HeadApiResponse, NodeConfig, NodePool, NodeSelector, ObjectApiResponse, Serializer, Transport, ) from elastic_transport.client_utils import DEFAULT, DefaultType from ...exceptions import ApiError, TransportError from ...serializer import DEFAULT_SERIALIZERS from ._base import ( BaseClient, create_sniff_callback, default_sniff_callback, resolve_auth_headers, ) from .async_search import AsyncSearchClient from .autoscaling import AutoscalingClient from .cat import CatClient from .ccr import CcrClient from .cluster import ClusterClient from .connector import ConnectorClient from .dangling_indices import DanglingIndicesClient from .enrich import EnrichClient from .eql import EqlClient from .esql import EsqlClient from .features import FeaturesClient from .fleet import FleetClient from .graph import GraphClient from .ilm import IlmClient from .indices import IndicesClient from .inference import InferenceClient from .ingest import IngestClient from .license import LicenseClient from .logstash import LogstashClient from .migration import MigrationClient from .ml import MlClient from .monitoring import MonitoringClient from .nodes import NodesClient from .query_rules import QueryRulesClient from .rollup import RollupClient from .search_application import SearchApplicationClient from .searchable_snapshots import SearchableSnapshotsClient from .security import SecurityClient from .shutdown import ShutdownClient from .simulate import SimulateClient from .slm import SlmClient from .snapshot import SnapshotClient from .sql import SqlClient from .ssl import SslClient from .synonyms import SynonymsClient from .tasks import TasksClient from .text_structure import TextStructureClient from .transform import TransformClient from .utils import ( _TYPE_HOSTS, CLIENT_META_SERVICE, SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, client_node_configs, is_requests_http_auth, is_requests_node_class, ) from .watcher import WatcherClient from .xpack import XPackClient logger = logging.getLogger("elasticsearch") SelfType = t.TypeVar("SelfType", bound="Elasticsearch") class Elasticsearch(BaseClient): """ Elasticsearch low-level client. Provides a straightforward mapping from Python to Elasticsearch REST APIs. The client instance has additional attributes to update APIs in different namespaces such as ``async_search``, ``indices``, ``security``, and more: .. code-block:: python client = Elasticsearch("http://localhost:9200") # Get Document API client.get(index="*", id="1") # Get Index API client.indices.get(index="*") Transport options can be set on the client constructor or using the :meth:`~elasticsearch.Elasticsearch.options` method: .. code-block:: python # Set 'api_key' on the constructor client = Elasticsearch( "http://localhost:9200", api_key="api_key", ) client.search(...) # Set 'api_key' per request client.options(api_key="api_key").search(...) """ def __init__( self, hosts: t.Optional[_TYPE_HOSTS] = None, *, # API cloud_id: t.Optional[str] = None, api_key: t.Optional[t.Union[str, t.Tuple[str, str]]] = None, basic_auth: t.Optional[t.Union[str, t.Tuple[str, str]]] = None, bearer_auth: t.Optional[str] = None, opaque_id: t.Optional[str] = None, # Node headers: t.Union[DefaultType, t.Mapping[str, str]] = DEFAULT, connections_per_node: t.Union[DefaultType, int] = DEFAULT, http_compress: t.Union[DefaultType, bool] = DEFAULT, verify_certs: t.Union[DefaultType, bool] = DEFAULT, ca_certs: t.Union[DefaultType, str] = DEFAULT, client_cert: t.Union[DefaultType, str] = DEFAULT, client_key: t.Union[DefaultType, str] = DEFAULT, ssl_assert_hostname: t.Union[DefaultType, str] = DEFAULT, ssl_assert_fingerprint: t.Union[DefaultType, str] = DEFAULT, ssl_version: t.Union[DefaultType, int] = DEFAULT, ssl_context: t.Union[DefaultType, t.Any] = DEFAULT, ssl_show_warn: t.Union[DefaultType, bool] = DEFAULT, # Transport transport_class: t.Type[Transport] = Transport, request_timeout: t.Union[DefaultType, None, float] = DEFAULT, node_class: t.Union[DefaultType, t.Type[BaseNode]] = DEFAULT, node_pool_class: t.Union[DefaultType, t.Type[NodePool]] = DEFAULT, randomize_nodes_in_pool: t.Union[DefaultType, bool] = DEFAULT, node_selector_class: t.Union[DefaultType, t.Type[NodeSelector]] = DEFAULT, dead_node_backoff_factor: t.Union[DefaultType, float] = DEFAULT, max_dead_node_backoff: t.Union[DefaultType, float] = DEFAULT, serializer: t.Optional[Serializer] = None, serializers: t.Union[DefaultType, t.Mapping[str, Serializer]] = DEFAULT, default_mimetype: str = "application/json", max_retries: t.Union[DefaultType, int] = DEFAULT, retry_on_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, retry_on_timeout: t.Union[DefaultType, bool] = DEFAULT, sniff_on_start: t.Union[DefaultType, bool] = DEFAULT, sniff_before_requests: t.Union[DefaultType, bool] = DEFAULT, sniff_on_node_failure: t.Union[DefaultType, bool] = DEFAULT, sniff_timeout: t.Union[DefaultType, None, float] = DEFAULT, min_delay_between_sniffing: t.Union[DefaultType, None, float] = DEFAULT, sniffed_node_callback: t.Optional[ t.Callable[[t.Dict[str, t.Any], NodeConfig], t.Optional[NodeConfig]] ] = None, meta_header: t.Union[DefaultType, bool] = DEFAULT, http_auth: t.Union[DefaultType, t.Any] = DEFAULT, # Internal use only _transport: t.Optional[Transport] = None, ) -> None: if hosts is None and cloud_id is None and _transport is None: raise ValueError("Either 'hosts' or 'cloud_id' must be specified") if serializer is not None: if serializers is not DEFAULT: raise ValueError( "Can't specify both 'serializer' and 'serializers' parameters " "together. Instead only specify one of the other." ) serializers = {default_mimetype: serializer} # Setting min_delay_between_sniffing=True implies sniff_before_requests=True if min_delay_between_sniffing is not DEFAULT: sniff_before_requests = True sniffing_options = ( sniff_timeout, sniff_on_start, sniff_before_requests, sniff_on_node_failure, sniffed_node_callback, min_delay_between_sniffing, sniffed_node_callback, ) if cloud_id is not None and any( x is not DEFAULT and x is not None for x in sniffing_options ): raise ValueError( "Sniffing should not be enabled when connecting to Elastic Cloud" ) sniff_callback = None if sniffed_node_callback is not None: sniff_callback = create_sniff_callback( sniffed_node_callback=sniffed_node_callback ) elif ( sniff_on_start is True or sniff_before_requests is True or sniff_on_node_failure is True ): sniff_callback = default_sniff_callback if _transport is None: requests_session_auth = None if http_auth is not None and http_auth is not DEFAULT: if is_requests_http_auth(http_auth): # If we're using custom requests authentication # then we need to alert the user that they also # need to use 'node_class=requests'. if not is_requests_node_class(node_class): raise ValueError( "Using a custom 'requests.auth.AuthBase' class for " "'http_auth' must be used with node_class='requests'" ) # Reset 'http_auth' to DEFAULT so it's not consumed below. requests_session_auth = http_auth http_auth = DEFAULT node_configs = client_node_configs( hosts, cloud_id=cloud_id, requests_session_auth=requests_session_auth, connections_per_node=connections_per_node, http_compress=http_compress, verify_certs=verify_certs, ca_certs=ca_certs, client_cert=client_cert, client_key=client_key, ssl_assert_hostname=ssl_assert_hostname, ssl_assert_fingerprint=ssl_assert_fingerprint, ssl_version=ssl_version, ssl_context=ssl_context, ssl_show_warn=ssl_show_warn, ) transport_kwargs: t.Dict[str, t.Any] = {} if node_class is not DEFAULT: transport_kwargs["node_class"] = node_class if node_pool_class is not DEFAULT: transport_kwargs["node_pool_class"] = node_pool_class if randomize_nodes_in_pool is not DEFAULT: transport_kwargs["randomize_nodes_in_pool"] = randomize_nodes_in_pool if node_selector_class is not DEFAULT: transport_kwargs["node_selector_class"] = node_selector_class if dead_node_backoff_factor is not DEFAULT: transport_kwargs["dead_node_backoff_factor"] = dead_node_backoff_factor if max_dead_node_backoff is not DEFAULT: transport_kwargs["max_dead_node_backoff"] = max_dead_node_backoff if meta_header is not DEFAULT: transport_kwargs["meta_header"] = meta_header transport_serializers = DEFAULT_SERIALIZERS.copy() if serializers is not DEFAULT: transport_serializers.update(serializers) # Override compatibility serializers from their non-compat mimetypes too. # So we use the same serializer for requests and responses. for mime_subtype in ("json", "x-ndjson"): if f"application/{mime_subtype}" in serializers: compat_mimetype = ( f"application/vnd.elasticsearch+{mime_subtype}" ) if compat_mimetype not in serializers: transport_serializers[compat_mimetype] = serializers[ f"application/{mime_subtype}" ] transport_kwargs["serializers"] = transport_serializers transport_kwargs["default_mimetype"] = default_mimetype if sniff_on_start is not DEFAULT: transport_kwargs["sniff_on_start"] = sniff_on_start if sniff_before_requests is not DEFAULT: transport_kwargs["sniff_before_requests"] = sniff_before_requests if sniff_on_node_failure is not DEFAULT: transport_kwargs["sniff_on_node_failure"] = sniff_on_node_failure if sniff_timeout is not DEFAULT: transport_kwargs["sniff_timeout"] = sniff_timeout if min_delay_between_sniffing is not DEFAULT: transport_kwargs["min_delay_between_sniffing"] = ( min_delay_between_sniffing ) _transport = transport_class( node_configs, client_meta_service=CLIENT_META_SERVICE, sniff_callback=sniff_callback, **transport_kwargs, ) super().__init__(_transport) # These are set per-request so are stored separately. self._request_timeout = request_timeout self._max_retries = max_retries self._retry_on_timeout = retry_on_timeout if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) self._retry_on_status = retry_on_status else: super().__init__(_transport) if headers is not DEFAULT and headers is not None: self._headers.update(headers) if opaque_id is not DEFAULT and opaque_id is not None: # type: ignore[comparison-overlap] self._headers["x-opaque-id"] = opaque_id self._headers = resolve_auth_headers( self._headers, http_auth=http_auth, api_key=api_key, basic_auth=basic_auth, bearer_auth=bearer_auth, ) # namespaced clients for compatibility with API names self.async_search = AsyncSearchClient(self) self.autoscaling = AutoscalingClient(self) self.cat = CatClient(self) self.cluster = ClusterClient(self) self.connector = ConnectorClient(self) self.fleet = FleetClient(self) self.features = FeaturesClient(self) self.indices = IndicesClient(self) self.inference = InferenceClient(self) self.ingest = IngestClient(self) self.nodes = NodesClient(self) self.snapshot = SnapshotClient(self) self.tasks = TasksClient(self) self.xpack = XPackClient(self) self.ccr = CcrClient(self) self.dangling_indices = DanglingIndicesClient(self) self.enrich = EnrichClient(self) self.eql = EqlClient(self) self.esql = EsqlClient(self) self.graph = GraphClient(self) self.ilm = IlmClient(self) self.license = LicenseClient(self) self.logstash = LogstashClient(self) self.migration = MigrationClient(self) self.ml = MlClient(self) self.monitoring = MonitoringClient(self) self.query_rules = QueryRulesClient(self) self.rollup = RollupClient(self) self.search_application = SearchApplicationClient(self) self.searchable_snapshots = SearchableSnapshotsClient(self) self.security = SecurityClient(self) self.slm = SlmClient(self) self.simulate = SimulateClient(self) self.shutdown = ShutdownClient(self) self.sql = SqlClient(self) self.ssl = SslClient(self) self.synonyms = SynonymsClient(self) self.text_structure = TextStructureClient(self) self.transform = TransformClient(self) self.watcher = WatcherClient(self) def __repr__(self) -> str: try: # get a list of all connections nodes = [node.base_url for node in self.transport.node_pool.all()] # truncate to 5 if there are too many if len(nodes) > 5: nodes = nodes[:5] + ["..."] return f"<{self.__class__.__name__}({nodes})>" except Exception: # probably operating on custom transport and connection_pool, ignore return super().__repr__() def __enter__(self) -> "Elasticsearch": try: # All this to avoid a Mypy error when using unasync. getattr(self.transport, "_async_call")() except AttributeError: pass return self def __exit__(self, *_: t.Any) -> None: self.close() def options( self: SelfType, *, opaque_id: t.Union[DefaultType, str] = DEFAULT, api_key: t.Union[DefaultType, str, t.Tuple[str, str]] = DEFAULT, basic_auth: t.Union[DefaultType, str, t.Tuple[str, str]] = DEFAULT, bearer_auth: t.Union[DefaultType, str] = DEFAULT, headers: t.Union[DefaultType, t.Mapping[str, str]] = DEFAULT, request_timeout: t.Union[DefaultType, t.Optional[float]] = DEFAULT, ignore_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, max_retries: t.Union[DefaultType, int] = DEFAULT, retry_on_status: t.Union[DefaultType, int, t.Collection[int]] = DEFAULT, retry_on_timeout: t.Union[DefaultType, bool] = DEFAULT, ) -> SelfType: client = type(self)(_transport=self.transport) resolved_headers = headers if headers is not DEFAULT else None resolved_headers = resolve_auth_headers( headers=resolved_headers, api_key=api_key, basic_auth=basic_auth, bearer_auth=bearer_auth, ) resolved_opaque_id = opaque_id if opaque_id is not DEFAULT else None if resolved_opaque_id: resolved_headers["x-opaque-id"] = resolved_opaque_id if resolved_headers: new_headers = self._headers.copy() new_headers.update(resolved_headers) client._headers = new_headers else: client._headers = self._headers.copy() if request_timeout is not DEFAULT: client._request_timeout = request_timeout else: client._request_timeout = self._request_timeout if ignore_status is not DEFAULT: if isinstance(ignore_status, int): ignore_status = (ignore_status,) client._ignore_status = ignore_status else: client._ignore_status = self._ignore_status if max_retries is not DEFAULT: if not isinstance(max_retries, int): raise TypeError("'max_retries' must be of type 'int'") client._max_retries = max_retries else: client._max_retries = self._max_retries if retry_on_status is not DEFAULT: if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) client._retry_on_status = retry_on_status else: client._retry_on_status = self._retry_on_status if retry_on_timeout is not DEFAULT: if not isinstance(retry_on_timeout, bool): raise TypeError("'retry_on_timeout' must be of type 'bool'") client._retry_on_timeout = retry_on_timeout else: client._retry_on_timeout = self._retry_on_timeout return client def close(self) -> None: """Closes the Transport and all internal connections""" self.transport.close() @_rewrite_parameters() def ping( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[t.List[str], str]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> bool: """ Returns True if a successful response returns from the info() API, otherwise returns False. This API call can fail either at the transport layer (due to connection errors or timeouts) or from a non-2XX HTTP response (due to authentication or authorization issues). If you want to discover why the request failed you should use the ``info()`` API. ``_ """ __path = "/" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} try: self.perform_request("HEAD", __path, params=__query, headers=__headers) return True except (ApiError, TransportError): return False # AUTO-GENERATED-API-DEFINITIONS # @_rewrite_parameters( body_name="operations", parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def bulk( self, *, operations: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_source_on_error: t.Optional[bool] = None, list_executed_pipelines: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk index or delete documents. Perform multiple index, create, delete, and update actions in a single request. This reduces overhead and can greatly increase indexing speed.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To use the create action, you must have the create_doc, create, index, or write index privilege. Data streams support only the create action.
  • To use the index action, you must have the create, index, or write index privilege.
  • To use the delete action, you must have the delete or write index privilege.
  • To use the update action, you must have the index or write index privilege.
  • To automatically create a data stream or index with a bulk API request, you must have the auto_configure, create_index, or manage index privilege.
  • To make the result of a bulk operation visible to search using the refresh parameter, you must have the maintenance or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

The actions are specified in the request body using a newline delimited JSON (NDJSON) structure:

action_and_meta_data\\n
          optional_source\\n
          action_and_meta_data\\n
          optional_source\\n
          ....
          action_and_meta_data\\n
          optional_source\\n
          

The index and create actions expect a source on the next line and have the same semantics as the op_type parameter in the standard index API. A create action fails if a document with the same ID already exists in the target An index action adds or replaces a document as necessary.

NOTE: Data streams support only the create action. To update or delete a document in a data stream, you must target the backing index containing the document.

An update action expects that the partial doc, upsert, and script and its options are specified on the next line.

A delete action does not expect a source on the next line and has the same semantics as the standard delete API.

NOTE: The final line of data must end with a newline character (\\n). Each newline character may be preceded by a carriage return (\\r). When sending NDJSON data to the _bulk endpoint, use a Content-Type header of application/json or application/x-ndjson. Because this format uses literal newline characters (\\n) as delimiters, make sure that the JSON actions and sources are not pretty printed.

If you provide a target in the request path, it is used for any actions that don't explicitly specify an _index argument.

A note on the format: the idea here is to make processing as fast as possible. As some of the actions are redirected to other shards on other nodes, only action_meta_data is parsed on the receiving node side.

Client libraries using this protocol should try and strive to do something similar on the client side, and reduce buffering as much as possible.

There is no "correct" number of actions to perform in a single bulk request. Experiment with different settings to find the optimal size for your particular workload. Note that Elasticsearch limits the maximum size of a HTTP request to 100mb by default so clients must ensure that no request exceeds this size. It is not possible to index a single document that exceeds the size limit, so you must pre-process any such documents into smaller pieces before sending them to Elasticsearch. For instance, split documents into pages or chapters before indexing them, or store raw binary data in a system outside Elasticsearch and replace the raw data with a link to the external system in the documents that you send to Elasticsearch.

Client suppport for bulk requests

Some of the officially supported clients provide helpers to assist with bulk requests and reindexing:

  • Go: Check out esutil.BulkIndexer
  • Perl: Check out Search::Elasticsearch::Client::5_0::Bulk and Search::Elasticsearch::Client::5_0::Scroll
  • Python: Check out elasticsearch.helpers.*
  • JavaScript: Check out client.helpers.*
  • .NET: Check out BulkAllObservable
  • PHP: Check out bulk indexing.
  • Ruby: Check out Elasticsearch::Helpers::BulkHelper

Submitting bulk requests with cURL

If you're providing text file input to curl, you must use the --data-binary flag instead of plain -d. The latter doesn't preserve newlines. For example:

$ cat requests
          { "index" : { "_index" : "test", "_id" : "1" } }
          { "field1" : "value1" }
          $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
          {"took":7, "errors": false, "items":[{"index":{"_index":"test","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
          

Optimistic concurrency control

Each index and delete action within a bulk API call may include the if_seq_no and if_primary_term parameters in their respective action and meta data lines. The if_seq_no and if_primary_term parameters control how operations are run, based on the last modification to existing documents. See Optimistic concurrency control for more details.

Versioning

Each bulk item can include the version value using the version field. It automatically follows the behavior of the index or delete operation based on the _version mapping. It also support the version_type.

Routing

Each bulk item can include the routing value using the routing field. It automatically follows the behavior of the index or delete operation based on the _routing mapping.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Wait for active shards

When making bulk calls, you can set the wait_for_active_shards parameter to require a minimum number of shard copies to be active before starting to process the bulk request.

Refresh

Control when the changes made by this request are visible to search.

NOTE: Only the shards that receive the bulk request will be affected by refresh. Imagine a _bulk?refresh=wait_for request with three documents in it that happen to be routed to different shards in an index with five shards. The request will only wait for those three shards to refresh. The other two shards that make up the index do not participate in the _bulk request at all.

You might want to disable the refresh interval temporarily to improve indexing throughput for large bulk requests. Refer to the linked documentation for step-by-step instructions using the index settings API.

``_ :param operations: :param index: The name of the data stream, index, or index alias to perform bulk actions on. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param list_executed_pipelines: If `true`, the response will include the ingest pipelines that were run for each index or create. :param pipeline: The pipeline identifier to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, wait for a refresh to make this operation visible to search. If `false`, do nothing with refreshes. Valid values: `true`, `false`, `wait_for`. :param require_alias: If `true`, the request's actions must target an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or contains a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param timeout: The period each action waits for the following operations: automatic index creation, dynamic mapping updates, and waiting for active shards. The default is `1m` (one minute), which guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default is `1`, which waits for each primary shard to be active. """ if operations is None and body is None: raise ValueError( "Empty value passed for parameters 'operations' and 'body', one of them should be set." ) elif operations is not None and body is not None: raise ValueError("Cannot set both 'operations' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_bulk' else: __path_parts = {} __path = "/_bulk" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if list_executed_pipelines is not None: __query["list_executed_pipelines"] = list_executed_pipelines if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = operations if operations is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="bulk", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scroll_id",), ) def clear_scroll( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, scroll_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear a scrolling search. Clear the search context and results for a scrolling search.

``_ :param scroll_id: The scroll IDs to clear. To clear all scroll IDs, use `_all`. """ __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if scroll_id is not None: __body["scroll_id"] = scroll_id if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="clear_scroll", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id",), ) def close_point_in_time( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close a point in time. A point in time must be opened explicitly before being used in search requests. The keep_alive parameter tells Elasticsearch how long it should persist. A point in time is automatically closed when the keep_alive period has elapsed. However, keeping points in time has a cost; close them as soon as they are no longer required for search requests.

``_ :param id: The ID of the point-in-time. """ if id is None and body is None: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {} __path = "/_pit" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="close_point_in_time", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), ) def count( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, min_score: t.Optional[float] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, terminate_after: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Count search results. Get the number of documents matching a query.

The query can be provided either by using a simple query string as a parameter, or by defining Query DSL within the request body. The query is optional. When no query is provided, the API uses match_all to count all the documents.

The count API supports multi-target syntax. You can run a single count API search across multiple data streams and indices.

The operation is broadcast across all shards. For each shard ID group, a replica is chosen and the search is run against it. This means that replicas increase the scalability of the count.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_throttled: If `true`, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param min_score: The minimum `_score` value that documents must have to be included in the result. :param preference: The node or shard the operation should be performed on. By default, it is random. :param q: The query in Lucene query string syntax. This parameter cannot be used with a request body. :param query: Defines the search query using Query DSL. A request body query cannot be used with the `q` query string parameter. :param routing: A custom value used to route operations to a specific shard. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_count' else: __path_parts = {} __path = "/_count" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if min_score is not None: __query["min_score"] = min_score if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if routing is not None: __query["routing"] = routing if terminate_after is not None: __query["terminate_after"] = terminate_after if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="count", path_parts=__path_parts, ) @_rewrite_parameters( body_name="document", ) def create( self, *, index: str, id: str, document: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_source_on_error: t.Optional[bool] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a new document in the index.

You can index a new JSON document with the /<target>/_doc/ or /<target>/_create/<_id> APIs Using _create guarantees that the document is indexed only if it does not already exist. It returns a 409 response when a document with a same ID already exists in the index. To update an existing document, you must use the /<target>/_doc/ API.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To add a document using the PUT /<target>/_create/<_id> or POST /<target>/_create/<_id> request formats, you must have the create_doc, create, index, or write index privilege.
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

Automatically create data streams and indices

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed.

Automatic index creation is controlled by the action.auto_create_index setting. If it is true, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow.

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. It does not affect the creation of data streams.

Routing

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Distributed

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

Active shards

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. To alter this behavior per operation, use the wait_for_active_shards request parameter.

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). Specifying a negative value or a number greater than the number of shard copies will throw an error.

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn’t match a data stream template, this request creates the index. :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format. :param document: :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, setting the value to `_none` turns off the default ingest pipeline for this request. If a final pipeline is configured, it will always run regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. Elasticsearch waits for at least the specified timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. :param version: The explicit version number for concurrency control. It must be a non-negative long number. :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if document is None and body is None: raise ValueError( "Empty value passed for parameters 'document' and 'body', one of them should be set." ) elif document is not None and body is not None: raise ValueError("Cannot set both 'document' and 'body'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_create/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = document if document is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="create", path_parts=__path_parts, ) @_rewrite_parameters() def delete( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a document.

Remove a JSON document from the specified index.

NOTE: You cannot send deletion requests directly to a data stream. To delete a document in a data stream, you must target the backing index containing the document.

Optimistic concurrency control

Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

Versioning

Each document indexed is versioned. When deleting a document, the version can be specified to make sure the relevant document you are trying to delete is actually being deleted and it has not changed in the meantime. Every write operation run on a document, deletes included, causes its version to be incremented. The version number of a deleted document remains available for a short time after deletion to allow for control of concurrent operations. The length of time for which a deleted document's version remains available is determined by the index.gc_deletes index setting.

Routing

If routing is used during indexing, the routing value also needs to be specified to delete a document.

If the _routing mapping is set to required and no routing value is specified, the delete API throws a RoutingMissingException and rejects the request.

For example:

DELETE /my-index-000001/_doc/1?routing=shard-1
          

This request deletes the document with ID 1, but it is routed based on the user. The document is not deleted if the correct routing is not specified.

Distributed

The delete operation gets hashed into a specific shard ID. It then gets redirected into the primary shard within that ID group and replicated (if needed) to shard replicas within that ID group.

``_ :param index: The name of the target index. :param id: A unique identifier for the document. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param routing: A custom value used to route operations to a specific shard. :param timeout: The period to wait for active shards. This parameter is useful for situations where the primary shard assigned to perform the delete operation might not be available when the delete operation runs. Some reasons for this might be that the primary shard is currently recovering from a store or undergoing relocation. By default, the delete operation will wait on the primary shard to become available for up to 1 minute before failing and responding with an error. :param version: An explicit version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. :param wait_for_active_shards: The minimum number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="delete", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("max_docs", "query", "slice", "sort"), parameter_aliases={"from": "from_"}, ) def delete_by_query( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, max_docs: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, routing: t.Optional[str] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, scroll_size: t.Optional[int] = None, search_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, stats: t.Optional[t.Sequence[str]] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[bool] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete documents.

Deletes documents that match the specified query.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

  • read
  • delete or write

You can specify the query criteria in the request URI or the request body using the same syntax as the search API. When you submit a delete by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and deletes matching documents using internal versioning. If a document changes between the time that the snapshot is taken and the delete operation is processed, it results in a version conflict and the delete operation fails.

NOTE: Documents with a version equal to 0 cannot be deleted using delete by query because internal versioning does not support 0 as a valid version number.

While processing a delete by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents to delete. A bulk delete request is performed for each batch of matching documents. If a search or bulk request is rejected, the requests are retried up to 10 times, with exponential back off. If the maximum retry limit is reached, processing halts and all failed requests are returned in the response. Any delete requests that completed successfully still stick, they are not rolled back.

You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. Note that if you opt to count version conflicts the operation could attempt to delete more documents from the source than max_docs until it has successfully deleted max_docs documents, or it has gone through every document in the source query.

Throttling delete requests

To control the rate at which delete by query issues batches of delete operations, you can set requests_per_second to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set requests_per_second to -1 to disable throttling.

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. By default the batch size is 1000, so if requests_per_second is set to 500:

target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth".

Slicing

Delete by query supports sliced scroll to parallelize the delete process. This can improve efficiency and provide a convenient way to break the request down into smaller parts.

Setting slices to auto lets Elasticsearch choose the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards. Adding slices to the delete by query operation creates sub-requests which means it has some quirks:

  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • Canceling the request with slices will cancel each sub-request.
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the earlier point about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being deleted.
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • Delete performance scales linearly across available resources with the number of slices.

Whether query or delete performance dominates the runtime depends on the documents being reindexed and cluster resources.

Cancel a delete by query operation

Any delete by query can be canceled using the task cancel API. For example:

POST _tasks/r1A2WoRbTwKZ516z6NEs5A:36619/_cancel
          

The task ID can be found by using the get tasks API.

Cancellation should happen quickly but might take a few seconds. The get task status API will continue to list the delete by query task until this task checks that it has been cancelled and terminates itself.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: Analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param conflicts: What to do if delete by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to delete. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: A query in the Lucene query string syntax. :param query: The documents to delete specified with Query DSL. :param refresh: If `true`, Elasticsearch refreshes all shards involved in the delete by query after the request completes. This is different than the delete API's `refresh` parameter, which causes just the shard that received the delete request to be refreshed. Unlike the delete API, it does not support `wait_for`. :param request_cache: If `true`, the request cache is used for this request. Defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. :param routing: A custom value used to route operations to a specific shard. :param scroll: The period to retain the search context for scrolling. :param scroll_size: The size of the scroll request that powers the operation. :param search_timeout: The explicit timeout for each search request. It defaults to no timeout. :param search_type: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A sort object that specifies the order of deleted documents. :param stats: The specific `tag` of the request for logging and statistical purposes. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. :param timeout: The period each deletion request waits for active shards. :param version: If `true`, returns the document version as part of a hit. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` value controls how long each write request waits for unavailable shards to become available. :param wait_for_completion: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_delete_by_query' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if conflicts is not None: __query["conflicts"] = conflicts if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: __query["request_cache"] = request_cache if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if scroll_size is not None: __query["scroll_size"] = scroll_size if search_timeout is not None: __query["search_timeout"] = search_timeout if search_type is not None: __query["search_type"] = search_type if slices is not None: __query["slices"] = slices if stats is not None: __query["stats"] = stats if terminate_after is not None: __query["terminate_after"] = terminate_after if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if max_docs is not None: __body["max_docs"] = max_docs if query is not None: __body["query"] = query if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="delete_by_query", path_parts=__path_parts, ) @_rewrite_parameters() def delete_by_query_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle a delete by query operation.

Change the number of requests per second for a particular delete by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per second. To disable throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_delete_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="delete_by_query_rethrottle", path_parts=__path_parts, ) @_rewrite_parameters() def delete_script( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a script or search template. Deletes a stored script or search template.

``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="delete_script", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def exists( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> HeadApiResponse: """ .. raw:: html

Check a document.

Verify that a document exists. For example, check to see if a document with the _id 0 exists:

HEAD my-index-000001/_doc/0
          

If the document exists, the API returns a status code of 200 - OK. If the document doesn’t exist, the API returns 404 - Not Found.

Versioning support

You can use the version parameter to check the document only if its current version is equal to the specified one.

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data.

``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). :param id: A unique document identifier. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. :param version: Explicit version number for concurrency control. The specified version must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="exists", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def exists_source( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> HeadApiResponse: """ .. raw:: html

Check for a document source.

Check whether a document source exists in an index. For example:

HEAD my-index-000001/_source/1
          

A document's source is not available if it is disabled in the mapping.

``_ :param index: A comma-separated list of data streams, indices, and aliases. It supports wildcards (`*`). :param id: A unique identifier for the document. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_source/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="exists_source", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def explain( self, *, index: str, id: str, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, lenient: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain a document match result. Get information about why a specific document matches, or doesn't match, a query. It computes a score explanation for a query and a specific document.

``_ :param index: Index names that are used to limit the request. Only a single index name can be provided to this parameter. :param id: The document identifier. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: The query in the Lucene query string syntax. :param query: Defines the search definition using the Query DSL. :param routing: A custom value used to route operations to a specific shard. :param source: `True` or `false` to return the `_source` field or not or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return in the response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_explain/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if lenient is not None: __query["lenient"] = lenient if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="explain", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("fields", "index_filter", "runtime_mappings"), ) def field_caps( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, filters: t.Optional[str] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_empty_fields: t.Optional[bool] = None, include_unmapped: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, types: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the field capabilities.

Get information about the capabilities of fields among multiple indices.

For data streams, the API returns field capabilities among the stream’s backing indices. It returns runtime fields like any other field. For example, a runtime field with a type of keyword is returned the same as any other field that belongs to the keyword family.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (*). To target all data streams and indices, omit this parameter or use * or _all. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with bar. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fields: A list of fields to retrieve capabilities for. Wildcard (`*`) expressions are supported. :param filters: A comma-separated list of filters to apply to the response. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param include_empty_fields: If false, empty fields are not included in the response. :param include_unmapped: If true, unmapped fields are included in the response. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. IMPORTANT: The filtering is done on a best-effort basis, it uses index statistics and mappings to rewrite queries to `match_none` instead of fully running the request. For instance a range query over a date field can rewrite to `match_none` if all documents within a shard (including deleted documents) are outside of the provided range. However, not all queries can rewrite to `match_none` so this API may return an index even if the provided filter matches no document. :param runtime_mappings: Define ad-hoc runtime fields in the request similar to the way it is done in search requests. These fields exist only as part of the query and take precedence over fields defined with the same name in the index mappings. :param types: A comma-separated list of field types to include. Any fields that do not match one of these types will be excluded from the results. It defaults to empty, meaning that all field types are returned. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_field_caps' else: __path_parts = {} __path = "/_field_caps" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if filters is not None: __query["filters"] = filters if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_empty_fields is not None: __query["include_empty_fields"] = include_empty_fields if include_unmapped is not None: __query["include_unmapped"] = include_unmapped if pretty is not None: __query["pretty"] = pretty if types is not None: __query["types"] = types if not __body: if fields is not None: __body["fields"] = fields if index_filter is not None: __body["index_filter"] = index_filter if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="field_caps", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def get( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a document by its ID.

Get a document and its source or stored fields from an index.

By default, this API is realtime and is not affected by the refresh rate of the index (when data will become visible for search). In the case where stored fields are requested with the stored_fields parameter and the document has been updated but is not yet refreshed, the API will have to parse and analyze the source to extract the stored fields. To turn off realtime behavior, set the realtime parameter to false.

Source filtering

By default, the API returns the contents of the _source field unless you have used the stored_fields parameter or the _source field is turned off. You can turn off _source retrieval by using the _source parameter:

GET my-index-000001/_doc/0?_source=false
          

If you only need one or two fields from the _source, use the _source_includes or _source_excludes parameters to include or filter out particular fields. This can be helpful with large documents where partial retrieval can save on network overhead Both parameters take a comma separated list of fields or wildcard expressions. For example:

GET my-index-000001/_doc/0?_source_includes=*.id&_source_excludes=entities
          

If you only want to specify includes, you can use a shorter notation:

GET my-index-000001/_doc/0?_source=*.id
          

Routing

If routing is used during indexing, the routing value also needs to be specified to retrieve a document. For example:

GET my-index-000001/_doc/2?routing=user1
          

This request gets the document with ID 2, but it is routed based on the user. The document is not fetched if the correct routing is not specified.

Distributed

The GET operation is hashed into a specific shard ID. It is then redirected to one of the replicas within that shard ID and returns the result. The replicas are the primary shard and its replicas within that shard ID group. This means that the more replicas you have, the better your GET scaling will be.

Versioning support

You can use the version parameter to retrieve the document only if its current version is equal to the specified one.

Internally, Elasticsearch has marked the old document as deleted and added an entirely new document. The old version of the document doesn't disappear immediately, although you won't be able to access it. Elasticsearch cleans up deleted documents in the background as you continue to index more data.

``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. :param force_synthetic_source: Indicates whether the request forces synthetic `_source`. Use this parameter to test if the mapping supports synthetic `_source` and to get a sense of the worst case performance. Fetches with this parameter enabled will be slower than enabling synthetic source natively in the index. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. If it is set to `_local`, the operation will prefer to be run on a local allocated shard when possible. If it is set to a custom value, the value is used to guarantee that the same shards will be used for the same custom value. This can help with "jumping values" when hitting different shards in different refresh states. A sample value can be something like the web session ID or the user name. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` parameter defaults to `false`. Only leaf fields can be retrieved with the `stored_fields` option. Object fields can't be returned; if specified, the request fails. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get", path_parts=__path_parts, ) @_rewrite_parameters() def get_script( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a script or search template. Retrieves a stored script or search template.

``_ :param id: The identifier for the stored script or search template. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script", path_parts=__path_parts, ) @_rewrite_parameters() def get_script_context( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get script contexts.

Get a list of supported script contexts and their methods.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_context" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script_context", path_parts=__path_parts, ) @_rewrite_parameters() def get_script_languages( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get script languages.

Get a list of available script types, languages, and contexts.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_script_language" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_script_languages", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def get_source( self, *, index: str, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a document's source.

Get the source of a document. For example:

GET my-index-000001/_source/1
          

You can use the source filtering parameters to control which parts of the _source are returned:

GET my-index-000001/_source/1/?_source_includes=*.id&_source_excludes=entities
          
``_ :param index: The name of the index that contains the document. :param id: A unique document identifier. :param preference: The node or shard the operation should be performed on. By default, the operation is randomized between the shard replicas. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes the relevant shards before retrieving the document. Setting it to `true` should be done after careful thought and verification that this does not cause a heavy load on the system (and slow down indexing). :param routing: A custom value used to route operations to a specific shard. :param source: Indicates whether to return the `_source` field (`true` or `false`) or lists the fields to return. :param source_excludes: A comma-separated list of source fields to exclude in the response. :param source_includes: A comma-separated list of source fields to include in the response. :param version: The version number for concurrency control. It must match the current version of the document for the request to succeed. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_source/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="get_source", path_parts=__path_parts, ) @_rewrite_parameters() def health_report( self, *, feature: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster health. Get a report with the health status of an Elasticsearch cluster. The report contains a list of indicators that compose Elasticsearch functionality.

Each indicator has a health status of: green, unknown, yellow or red. The indicator will provide an explanation and metadata describing the reason for its current health status.

The cluster’s status is controlled by the worst indicator status.

In the event that an indicator’s status is non-green, a list of impacts may be present in the indicator result which detail the functionalities that are negatively affected by the health issue. Each impact carries with it a severity level, an area of the system that is affected, and a simple description of the impact on the system.

Some health indicators can determine the root cause of a health problem and prescribe a set of steps that can be performed in order to improve the health of the system. The root cause and remediation steps are encapsulated in a diagnosis. A diagnosis contains a cause detailing a root cause analysis, an action containing a brief description of the steps to take to fix the problem, the list of affected resources (if applicable), and a detailed step-by-step troubleshooting guide to fix the diagnosed problem.

NOTE: The health indicators perform root cause analysis of non-green health statuses. This can be computationally expensive when called frequently. When setting up automated polling of the API for health status, set verbose to false to disable the more expensive analysis logic.

``_ :param feature: A feature of the cluster, as returned by the top-level health report API. :param size: Limit the number of affected resources the health report API returns. :param timeout: Explicit operation timeout. :param verbose: Opt-in for more information about the health of the system. """ __path_parts: t.Dict[str, str] if feature not in SKIP_IN_PATH: __path_parts = {"feature": _quote(feature)} __path = f'/_health_report/{__path_parts["feature"]}' else: __path_parts = {} __path = "/_health_report" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if timeout is not None: __query["timeout"] = timeout if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="health_report", path_parts=__path_parts, ) @_rewrite_parameters( body_name="document", ) def index( self, *, index: str, document: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, op_type: t.Optional[t.Union[str, t.Literal["create", "index"]]] = None, pipeline: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, require_data_stream: t.Optional[bool] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a document in an index.

Add a JSON document to the specified data stream or index and make it searchable. If the target is an index and the document already exists, the request updates the document and increments its version.

NOTE: You cannot use this API to send update requests for existing documents in a data stream.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or index alias:

  • To add or overwrite a document using the PUT /<target>/_doc/<_id> request format, you must have the create, index, or write index privilege.
  • To add a document using the POST /<target>/_doc/ request format, you must have the create_doc, create, index, or write index privilege.
  • To automatically create a data stream or index with this API request, you must have the auto_configure, create_index, or manage index privilege.

Automatic data stream creation requires a matching index template with data stream enabled.

NOTE: Replica shards might not all be started when an indexing operation returns successfully. By default, only the primary is required. Set wait_for_active_shards to change this default behavior.

Automatically create data streams and indices

If the request's target doesn't exist and matches an index template with a data_stream definition, the index operation automatically creates the data stream.

If the target doesn't exist and doesn't match a data stream template, the operation automatically creates the index and applies any matching index templates.

NOTE: Elasticsearch includes several built-in index templates. To avoid naming collisions with these templates, refer to index pattern documentation.

If no mapping exists, the index operation creates a dynamic mapping. By default, new fields and objects are automatically added to the mapping if needed.

Automatic index creation is controlled by the action.auto_create_index setting. If it is true, any index can be created automatically. You can modify this setting to explicitly allow or block automatic creation of indices that match specified patterns or set it to false to turn off automatic index creation entirely. Specify a comma-separated list of patterns you want to allow or prefix each pattern with + or - to indicate whether it should be allowed or blocked. When a list is specified, the default behaviour is to disallow.

NOTE: The action.auto_create_index setting affects the automatic creation of indices only. It does not affect the creation of data streams.

Optimistic concurrency control

Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the if_seq_no and if_primary_term parameters. If a mismatch is detected, the operation will result in a VersionConflictException and a status code of 409.

Routing

By default, shard placement — or routing — is controlled by using a hash of the document's ID value. For more explicit control, the value fed into the hash function used by the router can be directly specified on a per-operation basis using the routing parameter.

When setting up explicit mapping, you can also use the _routing field to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the _routing mapping is defined and set to be required, the index operation will fail if no routing value is provided or extracted.

NOTE: Data streams do not support custom routing unless they were created with the allow_custom_routing setting enabled in the template.

Distributed

The index operation is directed to the primary shard based on its route and performed on the actual node containing this shard. After the primary shard completes the operation, if needed, the update is distributed to applicable replicas.

Active shards

To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies before proceeding with the operation. If the requisite number of active shard copies are not available, then the write operation must wait and retry, until either the requisite shard copies have started or a timeout occurs. By default, write operations only wait for the primary shards to be active before proceeding (that is to say wait_for_active_shards is 1). This default can be overridden in the index settings dynamically by setting index.write.wait_for_active_shards. To alter this behavior per operation, use the wait_for_active_shards request parameter.

Valid values are all or any positive integer up to the total number of configured copies per shard in the index (which is number_of_replicas+1). Specifying a negative value or a number greater than the number of shard copies will throw an error.

For example, suppose you have a cluster of three nodes, A, B, and C and you create an index index with the number of replicas set to 3 (resulting in 4 shard copies, one more copy than there are nodes). If you attempt an indexing operation, by default the operation will only ensure the primary copy of each shard is available before proceeding. This means that even if B and C went down and A hosted the primary shard copies, the indexing operation would still proceed with only one copy of the data. If wait_for_active_shards is set on the request to 3 (and all three nodes are up), the indexing operation will require 3 active shard copies before proceeding. This requirement should be met because there are 3 active nodes in the cluster, each one holding a copy of the shard. However, if you set wait_for_active_shards to all (or to 4, which is the same in this situation), the indexing operation will not proceed as you do not have all 4 copies of each shard active in the index. The operation will timeout unless a new node is brought up in the cluster to host the fourth copy of the shard.

It is important to note that this setting greatly reduces the chances of the write operation not writing to the requisite number of shard copies, but it does not completely eliminate the possibility, because this check occurs before the write operation starts. After the write operation is underway, it is still possible for replication to fail on any number of shard copies but still succeed on the primary. The _shards section of the API response reveals the number of shard copies on which replication succeeded and failed.

No operation (noop) updates

When updating a document by using this API, a new version of the document is always created even if the document hasn't changed. If this isn't acceptable use the _update API with detect_noop set to true. The detect_noop option isn't available on this API because it doesn’t fetch the old source and isn't able to compare it against the new source.

There isn't a definitive rule for when noop updates aren't acceptable. It's a combination of lots of factors like how frequently your data source sends updates that are actually noops and how many queries per second Elasticsearch runs on the shard receiving the updates.

Versioning

Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, version_type should be set to external. The value provided must be a numeric, long value greater than or equal to 0, and less than around 9.2e+18.

NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, the operation runs without any version checks.

When using the external version type, the system checks to see if the version number passed to the index request is greater than the version of the currently stored document. If true, the document will be indexed and the new version number used. If the value provided is less than or equal to the stored document's version number, a version conflict will occur and the index operation will fail. For example:

PUT my-index-000001/_doc/1?version=2&version_type=external
          {
            "user": {
              "id": "elkbee"
            }
          }

          In this example, the operation will succeed since the supplied version of 2 is higher than the current document version of 1.
          If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 HTTP status code).

          A nice side effect is that there is no need to maintain strict ordering of async indexing operations run as a result of changes to a source database, as long as version numbers from the source database are used.
          Even the simple case of updating the Elasticsearch index using data from a database is simplified if external versioning is used, as only the latest version will be used if the index operations arrive out of order.
          
``_ :param index: The name of the data stream or index to target. If the target doesn't exist and matches the name or wildcard (`*`) pattern of an index template with a `data_stream` definition, this request creates the data stream. If the target doesn't exist and doesn't match a data stream template, this request creates the index. You can check for existing targets with the resolve index API. :param document: :param id: A unique identifier for the document. To automatically generate a document ID, use the `POST //_doc/` request format and omit this parameter. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param op_type: Set to `create` to only index the document if it does not already exist (put if absent). If a document with the specified `_id` already exists, the indexing operation will fail. The behavior is the same as using the `/_create` endpoint. If a document ID is specified, this paramater defaults to `index`. Otherwise, it defaults to `create`. If the request targets a data stream, an `op_type` of `create` is required. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param refresh: If `true`, Elasticsearch refreshes the affected shards to make this operation visible to search. If `wait_for`, it waits for a refresh to make this operation visible to search. If `false`, it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param require_data_stream: If `true`, the request's actions must target a data stream (existing or to be created). :param routing: A custom value that is used to route operations to a specific shard. :param timeout: The period the request waits for the following operations: automatic index creation, dynamic mapping updates, waiting for active shards. This parameter is useful for situations where the primary shard assigned to perform the operation might not be available when the operation runs. Some reasons for this might be that the primary shard is currently recovering from a gateway or undergoing relocation. By default, the operation will wait on the primary shard to become available for at least 1 minute before failing and responding with an error. The actual wait time could be longer, particularly when multiple waits occur. :param version: An explicit version number for concurrency control. It must be a non-negative long number. :param version_type: The version type. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. You can set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if document is None and body is None: raise ValueError( "Empty value passed for parameters 'document' and 'body', one of them should be set." ) elif document is not None and body is not None: raise ValueError("Cannot set both 'document' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and id not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_doc/{__path_parts["id"]}' __method = "PUT" elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_doc' __method = "POST" else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if op_type is not None: __query["op_type"] = op_type if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if require_data_stream is not None: __query["require_data_stream"] = require_data_stream if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __body = document if document is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] __method, __path, params=__query, headers=__headers, body=__body, endpoint_id="index", path_parts=__path_parts, ) @_rewrite_parameters() def info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster info. Get basic build, version, and cluster information. ::: In Serverless, this API is retained for backward compatibility only. Some response fields, such as the version number, should be ignored.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "ids"), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def mget( self, *, index: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, human: t.Optional[bool] = None, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, refresh: t.Optional[bool] = None, routing: t.Optional[str] = None, source: t.Optional[t.Union[bool, t.Union[str, t.Sequence[str]]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get multiple documents.

Get multiple JSON documents by ID from one or more indices. If you specify an index in the request URI, you only need to specify the document IDs in the request body. To ensure fast responses, this multi get (mget) API responds with partial results if one or more shards fail.

Filter source fields

By default, the _source field is returned for every document (if stored). Use the _source and _source_include or source_exclude attributes to filter what fields are returned for a particular document. You can include the _source, _source_includes, and _source_excludes query parameters in the request URI to specify the defaults to use when there are no per-document instructions.

Get stored fields

Use the stored_fields attribute to specify the set of stored fields you want to retrieve. Any requested fields that are not stored are ignored. You can include the stored_fields query parameter in the request URI to specify the defaults to use when there are no per-document instructions.

``_ :param index: Name of the index to retrieve documents from when `ids` are specified, or when a document in the `docs` array does not specify an index. :param docs: The documents you want to retrieve. Required if no index is specified in the request URI. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. :param ids: The IDs of the documents you want to retrieve. Allowed when the index is specified in the request URI. :param preference: Specifies the node or shard the operation should be performed on. Random by default. :param realtime: If `true`, the request is real-time as opposed to near-real-time. :param refresh: If `true`, the request refreshes relevant shards before retrieving documents. :param routing: Custom value used to route operations to a specific shard. :param source: True or false to return the `_source` field or not, or a list of fields to return. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stored_fields: If `true`, retrieves the document fields stored in the index rather than the document `_source`. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mget' else: __path_parts = {} __path = "/_mget" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if refresh is not None: __query["refresh"] = refresh if routing is not None: __query["routing"] = routing if source is not None: __query["_source"] = source if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if stored_fields is not None: __query["stored_fields"] = stored_fields if not __body: if docs is not None: __body["docs"] = docs if ids is not None: __body["ids"] = ids __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="mget", path_parts=__path_parts, ) @_rewrite_parameters( body_name="searches", ) def msearch( self, *, searches: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_named_queries_score: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple searches.

The format of the request is similar to the bulk API format and makes use of the newline delimited JSON (NDJSON) format. The structure is as follows:

header\\n
          body\\n
          header\\n
          body\\n
          

This structure is specifically optimized to reduce parsing if a specific search ends up redirected to another node.

IMPORTANT: The final line of data must end with a newline character \\n. Each newline character may be preceded by a carriage return \\r. When sending requests to this endpoint the Content-Type header should be set to application/x-ndjson.

``_ :param searches: :param index: Comma-separated list of data streams, indices, and index aliases to search. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param include_named_queries_score: Indicates whether hit.matched_queries should be rendered as a map that includes the name of the matched query associated with its score (true) or as an array containing the name of the matched queries (false) This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. Defaults to `max(1, (# of data nodes * min(search thread pool size, 10)))`. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. :param rest_total_hits_as_int: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. :param routing: Custom routing value used to route search operations to a specific shard. :param search_type: Indicates whether global term and document frequencies should be used when scoring returned documents. :param typed_keys: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. """ if searches is None and body is None: raise ValueError( "Empty value passed for parameters 'searches' and 'body', one of them should be set." ) elif searches is not None and body is not None: raise ValueError("Cannot set both 'searches' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_msearch' else: __path_parts = {} __path = "/_msearch" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_named_queries_score is not None: __query["include_named_queries_score"] = include_named_queries_score if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys __body = searches if searches is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="msearch", path_parts=__path_parts, ) @_rewrite_parameters( body_name="search_templates", ) def msearch_template( self, *, search_templates: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple templated searches.

Run multiple templated searches with a single request. If you are providing a text file or text input to curl, use the --data-binary flag instead of -d to preserve newlines. For example:

$ cat requests
          { "index": "my-index" }
          { "id": "my-search-template", "params": { "query_string": "hello world", "from": 0, "size": 10 }}
          { "index": "my-other-index" }
          { "id": "my-other-search-template", "params": { "query_type": "match_all" }}

          $ curl -H "Content-Type: application/x-ndjson" -XGET localhost:9200/_msearch/template --data-binary "@requests"; echo
          
``_ :param search_templates: :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. :param max_concurrent_searches: The maximum number of concurrent searches the API can run. :param rest_total_hits_as_int: If `true`, the response returns `hits.total` as an integer. If `false`, it returns `hits.total` as an object. :param search_type: The type of the search operation. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ if search_templates is None and body is None: raise ValueError( "Empty value passed for parameters 'search_templates' and 'body', one of them should be set." ) elif search_templates is not None and body is not None: raise ValueError("Cannot set both 'search_templates' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_msearch/template' else: __path_parts = {} __path = "/_msearch/template" __query: t.Dict[str, t.Any] = {} if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys __body = search_templates if search_templates is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="msearch_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "ids"), ) def mtermvectors( self, *, index: t.Optional[str] = None, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, field_statistics: t.Optional[bool] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ids: t.Optional[t.Sequence[str]] = None, offsets: t.Optional[bool] = None, payloads: t.Optional[bool] = None, positions: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, routing: t.Optional[str] = None, term_statistics: t.Optional[bool] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get multiple term vectors.

Get multiple term vectors with a single request. You can specify existing documents by index and ID or provide artificial documents in the body of the request. You can specify the index in the request body or request URI. The response contains a docs array with all the fetched termvectors. Each element has the structure provided by the termvectors API.

Artificial documents

You can also use mtermvectors to generate term vectors for artificial documents provided in the body of the request. The mapping used is determined by the specified _index.

``_ :param index: The name of the index that contains the documents. :param docs: An array of existing or artificial documents. :param field_statistics: If `true`, the response includes the document count, sum of document frequencies, and sum of total term frequencies. :param fields: A comma-separated list or wildcard expressions of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. :param ids: A simplified syntax to specify documents by their ID if they're in the same index. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param positions: If `true`, the response includes term positions. :param preference: The node or shard the operation should be performed on. It is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. :param routing: A custom value used to route operations to a specific shard. :param term_statistics: If true, the response includes term frequency and document frequency. :param version: If `true`, returns the document version as part of a hit. :param version_type: The version type. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mtermvectors' else: __path_parts = {} __path = "/_mtermvectors" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if field_statistics is not None: __query["field_statistics"] = field_statistics if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if offsets is not None: __query["offsets"] = offsets if payloads is not None: __query["payloads"] = payloads if positions is not None: __query["positions"] = positions if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if routing is not None: __query["routing"] = routing if term_statistics is not None: __query["term_statistics"] = term_statistics if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if not __body: if docs is not None: __body["docs"] = docs if ids is not None: __body["ids"] = ids if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="mtermvectors", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_filter",), ) def open_point_in_time( self, *, index: t.Union[str, t.Sequence[str]], keep_alive: t.Union[str, t.Literal[-1], t.Literal[0]], allow_partial_search_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, max_concurrent_shard_requests: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open a point in time.

A search request by default runs against the most recent visible data of the target indices, which is called point in time. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. In some cases, it’s preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between search_after requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time.

A point in time must be opened explicitly before being used in search requests.

A subsequent search request with the pit parameter must not specify index, routing, or preference values as these parameters are copied from the point in time.

Just like regular searches, you can use from and size to page through point in time search results, up to the first 10,000 hits. If you want to retrieve more hits, use PIT with search_after.

IMPORTANT: The open point in time request and each subsequent search request can return different identifiers; always use the most recently received ID for the next search request.

When a PIT that contains shard failures is used in a search request, the missing are always reported in the search response as a NoShardAvailableActionException exception. To get rid of these exceptions, a new PIT needs to be created so that shards missing from the previous PIT can be handled, assuming they become available in the meantime.

Keeping point in time alive

The keep_alive parameter, which is passed to a open point in time request and search request, extends the time to live of the corresponding point in time. The value does not need to be long enough to process all data — it just needs to be long enough for the next request.

Normally, the background merge process optimizes the index by merging together smaller segments to create new, bigger segments. Once the smaller segments are no longer needed they are deleted. However, open point-in-times prevent the old segments from being deleted since they are still in use.

TIP: Keeping older segments alive means that more disk space and file handles are needed. Ensure that you have configured your nodes to have ample free file handles.

Additionally, if a segment contains deleted or updated documents then the point in time must keep track of whether each document in the segment was live at the time of the initial search request. Ensure that your nodes have sufficient heap space if you have many open point-in-times on an index that is subject to ongoing deletes or updates. Note that a point-in-time doesn't prevent its associated indices from being deleted. You can check how many point-in-times (that is, search contexts) are open with the nodes stats API.

``_ :param index: A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices :param keep_alive: Extend the length of time that the point in time persists. :param allow_partial_search_results: Indicates whether the point in time tolerates unavailable shards or shard failures when initially creating the PIT. If `false`, creating a point in time request when a shard is missing or unavailable will throw an exception. If `true`, the point in time will contain all the shards that are available at the time of the request. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param index_filter: Filter indices if the provided query rewrites to `match_none` on every shard. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param preference: The node or shard the operation should be performed on. By default, it is random. :param routing: A custom value that is used to route operations to a specific shard. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if keep_alive is None and body is None: raise ValueError("Empty value passed for parameter 'keep_alive'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_pit' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if keep_alive is not None: __query["keep_alive"] = keep_alive if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing if not __body: if index_filter is not None: __body["index_filter"] = index_filter if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="open_point_in_time", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("script",), ) def put_script( self, *, id: str, script: t.Optional[t.Mapping[str, t.Any]] = None, context: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a script or search template. Creates or updates a stored script or search template.

``_ :param id: The identifier for the stored script or search template. It must be unique within the cluster. :param script: The script or search template, its parameters, and its language. :param context: The context in which the script or search template should run. To prevent errors, the API immediately compiles the script or template in this context. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if script is None and body is None: raise ValueError("Empty value passed for parameter 'script'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH and context not in SKIP_IN_PATH: __path_parts = {"id": _quote(id), "context": _quote(context)} __path = f'/_scripts/{__path_parts["id"]}/{__path_parts["context"]}' elif id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_scripts/{__path_parts["id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if script is not None: __body["script"] = script __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="put_script", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("requests", "metric"), ) def rank_eval( self, *, requests: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, metric: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, search_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate ranked search results.

Evaluate the quality of ranked search results over a set of typical search queries.

``_ :param requests: A set of typical search requests, together with their provided ratings. :param index: A comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard (`*`) expressions are supported. To target all data streams and indices in a cluster, omit this parameter or use `_all` or `*`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param metric: Definition of the evaluation metric to calculate. :param search_type: Search operation type """ if requests is None and body is None: raise ValueError("Empty value passed for parameter 'requests'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rank_eval' else: __path_parts = {} __path = "/_rank_eval" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if search_type is not None: __query["search_type"] = search_type if not __body: if requests is not None: __body["requests"] = requests if metric is not None: __body["metric"] = metric __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="rank_eval", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("dest", "source", "conflicts", "max_docs", "script", "size"), ) def reindex( self, *, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_docs: t.Optional[int] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, require_alias: t.Optional[bool] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, size: t.Optional[int] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reindex documents.

Copy documents from a source to a destination. You can copy all documents to the destination index or reindex a subset of the documents. The source can be any existing index, alias, or data stream. The destination must differ from the source. For example, you cannot reindex a data stream into itself.

IMPORTANT: Reindex requires _source to be enabled for all documents in the source. The destination should be configured as wanted before calling the reindex API. Reindex does not copy the settings from the source or its associated template. Mappings, shard counts, and replicas, for example, must be configured ahead of time.

If the Elasticsearch security features are enabled, you must have the following security privileges:

  • The read index privilege for the source data stream, index, or alias.
  • The write index privilege for the destination data stream, index, or index alias.
  • To automatically create a data stream or index with a reindex API request, you must have the auto_configure, create_index, or manage index privilege for the destination data stream, index, or alias.
  • If reindexing from a remote cluster, the source.remote.user must have the monitor cluster privilege and the read index privilege for the source data stream, index, or alias.

If reindexing from a remote cluster, you must explicitly allow the remote host in the reindex.remote.whitelist setting. Automatic data stream creation requires a matching index template with data stream enabled.

The dest element can be configured like the index API to control optimistic concurrency control. Omitting version_type or setting it to internal causes Elasticsearch to blindly dump documents into the destination, overwriting any that happen to have the same ID.

Setting version_type to external causes Elasticsearch to preserve the version from the source, create any documents that are missing, and update any documents that have an older version in the destination than they do in the source.

Setting op_type to create causes the reindex API to create only missing documents in the destination. All existing documents will cause a version conflict.

IMPORTANT: Because data streams are append-only, any reindex request to a destination data stream must have an op_type of create. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream.

By default, version conflicts abort the reindex process. To continue reindexing if there are conflicts, set the conflicts request body property to proceed. In this case, the response includes a count of the version conflicts that were encountered. Note that the handling of other error types is unaffected by the conflicts property. Additionally, if you opt to count version conflicts, the operation could attempt to reindex more documents from the source than max_docs until it has successfully indexed max_docs documents into the target or it has gone through every document in the source query.

It's recommended to reindex on indices with a green status. Reindexing can fail when a node shuts down or crashes.

  • When requested with wait_for_completion=true (default), the request fails if the node shuts down.
  • When requested with wait_for_completion=false, a task id is returned, for use with the task management APIs. The task may disappear or fail if the node shuts down. When retrying a failed reindex operation, it might be necessary to set conflicts=proceed or to first delete the partial destination index. Additionally, dry runs, checking disk space, and fetching index recovery information can help address the root cause.

Refer to the linked documentation for examples of how to reindex documents.

``_ :param dest: The destination you are copying to. :param source: The source you are copying from. :param conflicts: Indicates whether to continue reindexing even when there are conflicts. :param max_docs: The maximum number of documents to reindex. By default, all documents are reindexed. If it is a value less then or equal to `scroll_size`, a scroll will not be used to retrieve the results for the operation. If `conflicts` is set to `proceed`, the reindex operation could attempt to reindex more documents from the source than `max_docs` until it has successfully indexed `max_docs` documents into the target or it has gone through every document in the source query. :param refresh: If `true`, the request refreshes affected shards to make this operation visible to search. :param requests_per_second: The throttle for this request in sub-requests per second. By default, there is no throttle. :param require_alias: If `true`, the destination must be an index alias. :param script: The script to run to update the document source or metadata when reindexing. :param scroll: The period of time that a consistent view of the index should be maintained for scrolled search. :param size: :param slices: The number of slices this task should be divided into. It defaults to one slice, which means the task isn't sliced into subtasks. Reindex supports sliced scroll to parallelize the reindexing process. This parallelization can improve efficiency and provide a convenient way to break the request down into smaller parts. NOTE: Reindexing from remote clusters does not support manual or automatic slicing. If set to `auto`, Elasticsearch chooses the number of slices to use. This setting will use one slice per shard, up to a certain limit. If there are multiple sources, it will choose the number of slices based on the index or backing index with the smallest number of shards. :param timeout: The period each indexing waits for automatic index creation, dynamic mapping updates, and waiting for active shards. By default, Elasticsearch waits for at least one minute before failing. The actual wait time could be longer, particularly when multiple waits occur. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set it to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The default value is one, which means it waits for each primary shard to be active. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {} __path = "/_reindex" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if require_alias is not None: __query["require_alias"] = require_alias if scroll is not None: __query["scroll"] = scroll if slices is not None: __query["slices"] = slices if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if conflicts is not None: __body["conflicts"] = conflicts if max_docs is not None: __body["max_docs"] = max_docs if script is not None: __body["script"] = script if size is not None: __body["size"] = size __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="reindex", path_parts=__path_parts, ) @_rewrite_parameters() def reindex_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle a reindex operation.

Change the number of requests per second for a particular reindex operation. For example:

POST _reindex/r1A2WoRbTwKZ516z6NEs5A:36619/_rethrottle?requests_per_second=-1
          

Rethrottling that speeds up the query takes effect immediately. Rethrottling that slows down the query will take effect after completing the current batch. This behavior prevents scroll timeouts.

``_ :param task_id: The task identifier, which can be found by using the tasks API. :param requests_per_second: The throttle for this request in sub-requests per second. It can be either `-1` to turn off throttling or any decimal number like `1.7` or `12` to throttle to that level. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_reindex/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="reindex_rethrottle", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("file", "params", "source"), ignore_deprecated_options={"params"}, ) def render_search_template( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, file: t.Optional[str] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Render a search template.

Render a search template as a search request body.

``_ :param id: The ID of the search template to render. If no `source` is specified, this or the `id` request body parameter is required. :param file: :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. :param source: An inline search template. It supports the same parameters as the search API's request body. These parameters also support Mustache variables. If no `id` or `` is specified, this parameter is required. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_render/template/{__path_parts["id"]}' else: __path_parts = {} __path = "/_render/template" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if file is not None: __body["file"] = file if params is not None: __body["params"] = params if source is not None: __body["source"] = source if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="render_search_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("context", "context_setup", "script"), ) @_stability_warning(Stability.EXPERIMENTAL) def scripts_painless_execute( self, *, context: t.Optional[ t.Union[ str, t.Literal[ "boolean_field", "composite_field", "date_field", "double_field", "filter", "geo_point_field", "ip_field", "keyword_field", "long_field", "painless_test", "score", ], ] ] = None, context_setup: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a script.

Runs a script and returns a result. Use this API to build and test scripts, such as when defining a script for a runtime field. This API requires very few dependencies and is especially useful if you don't have permissions to write documents on a cluster.

The API uses several contexts, which control how scripts are run, what variables are available at runtime, and what the return type is.

Each context requires a script, but additional parameters depend on the context you're using for that script.

``_ :param context: The context that the script should run in. NOTE: Result ordering in the field contexts is not guaranteed. :param context_setup: Additional parameters for the `context`. NOTE: This parameter is required for all contexts except `painless_test`, which is the default if no value is provided for `context`. :param script: The Painless script to run. """ __path_parts: t.Dict[str, str] = {} __path = "/_scripts/painless/_execute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if context is not None: __body["context"] = context if context_setup is not None: __body["context_setup"] = context_setup if script is not None: __body["script"] = script if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="scripts_painless_execute", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scroll_id", "scroll"), ) def scroll( self, *, scroll_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a scrolling search.

IMPORTANT: The scroll API is no longer recommend for deep pagination. If you need to preserve the index state while paging through more than 10,000 hits, use the search_after parameter with a point in time (PIT).

The scroll API gets large sets of results from a single scrolling search request. To get the necessary scroll ID, submit a search API request that includes an argument for the scroll query parameter. The scroll parameter indicates how long Elasticsearch should retain the search context for the request. The search response returns a scroll ID in the _scroll_id response body parameter. You can then use the scroll ID with the scroll API to retrieve the next batch of results for the request. If the Elasticsearch security features are enabled, the access to the results of a specific scroll ID is restricted to the user or API key that submitted the search.

You can also use the scroll API to specify a new scroll parameter that extends or shortens the retention period for the search context.

IMPORTANT: Results from a scrolling search reflect the state of the index at the time of the initial search request. Subsequent indexing or document changes only affect later search and scroll requests.

``_ :param scroll_id: The scroll ID of the search. :param rest_total_hits_as_int: If true, the API response’s hit.total property is returned as an integer. If false, the API response’s hit.total property is returned as an object. :param scroll: The period to retain the search context for scrolling. """ if scroll_id is None and body is None: raise ValueError("Empty value passed for parameter 'scroll_id'") __path_parts: t.Dict[str, str] = {} __path = "/_search/scroll" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if not __body: if scroll_id is not None: __body["scroll_id"] = scroll_id if scroll is not None: __body["scroll"] = scroll if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="scroll", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rank", "rescore", "retriever", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) def search( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force_synthetic_source: t.Optional[bool] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_named_queries_score: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rank: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, retriever: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search.

Get search hits that match the query defined in the request. You can provide search queries using the q query string parameter or the request body. If both are specified, only the query parameter is used.

If the Elasticsearch security features are enabled, you must have the read index privilege for the target data stream, index, or alias. For cross-cluster search, refer to the documentation about configuring CCS privileges. To search a point in time (PIT) for an alias, you must have the read index privilege for the alias's data streams or indices.

Search slicing

When paging through a large number of documents, it can be helpful to split the search into multiple slices to consume them independently with the slice and pit properties. By default the splitting is done first on the shards, then locally on each shard. The local splitting partitions the shard into contiguous ranges based on Lucene document IDs.

For instance if the number of shards is equal to 2 and you request 4 slices, the slices 0 and 2 are assigned to the first shard and the slices 1 and 3 are assigned to the second shard.

IMPORTANT: The same point-in-time ID should be used for all slices. If different PIT IDs are used, slices can overlap and miss documents. This situation can occur because the splitting criterion is based on Lucene document IDs, which are not stable across changes to the index.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param aggregations: Defines the aggregations that are run as part of the search request. :param aggs: Defines the aggregations that are run as part of the search request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param allow_partial_search_results: If `true` and there are shard request timeouts or shard failures, the request returns partial results. If `false`, it returns an error with no partial results. To override the default behavior, you can set the `search.default_allow_partial_results` cluster setting to `false`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param batched_reduce_size: The number of shard results that should be reduced at once on the coordinating node. If the potential number of shards in the request can be large, this value should be used as a protection mechanism to reduce the memory overhead per search request. :param ccs_minimize_roundtrips: If `true`, network round-trips between the coordinating node and the remote clusters are minimized when running cross-cluster search (CCS) requests. :param collapse: Collapses search results the values of the specified field. :param default_operator: The default operator for the query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as a default when no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param docvalue_fields: An array of wildcard (`*`) field patterns. The request returns doc values for field names matching these patterns in the `hits.fields` property of the response. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values such as `open,hidden`. :param explain: If `true`, the request returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: An array of wildcard (`*`) field patterns. The request returns values for field names matching these patterns in the `hits.fields` property of the response. :param force_synthetic_source: Should this request force synthetic _source? Use this to test if the mapping supports synthetic _source and to get a sense of the worst case performance. Fetches with this enabled will be slower the enabling synthetic source natively in the index. :param from_: The starting document offset, which must be non-negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param highlight: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in your search results. :param ignore_throttled: If `true`, concrete, expanded or aliased indices will be ignored when frozen. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_named_queries_score: If `true`, the response includes the score contribution from any named queries. This functionality reruns each named query on every hit in a search response. Typically, this adds a small overhead to a request. However, using computationally expensive named queries on a large number of hits may add significant overhead. :param indices_boost: Boost the `_score` of documents from specified indices. The boost value is the factor by which scores are multiplied. A boost value greater than `1.0` increases the score. A boost value between `0` and `1.0` decreases the score. :param knn: The approximate kNN search to run. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_concurrent_shard_requests: The number of concurrent shard requests per node that the search runs concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests. :param min_score: The minimum `_score` for matching documents. Documents with a lower `_score` are not included in search results and results collected by aggregations. :param pit: Limit the search to a point in time (PIT). If you provide a PIT, you cannot specify an `` in the request path. :param post_filter: Use the `post_filter` parameter to filter search results. The search hits are filtered after the aggregations are calculated. A post filter has no impact on the aggregation results. :param pre_filter_shard_size: A threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method (if date filters are mandatory to match but the shard bounds and the query are disjoint). When unspecified, the pre-filter phase is executed if any of these conditions is met: * The request targets more than 128 shards. * The request targets one or more read-only index. * The primary sort of the query targets an indexed field. :param preference: The nodes and shards used for the search. By default, Elasticsearch selects from eligible nodes and shards using adaptive replica selection, accounting for allocation awareness. Valid values are: * `_only_local` to run the search only on shards on the local node. * `_local` to, if possible, run the search on shards on the local node, or if not, select shards using the default method. * `_only_nodes:,` to run the search on only the specified nodes IDs. If suitable shards exist on more than one selected node, use shards on those nodes using the default method. If none of the specified nodes are available, select shards from any available node using the default method. * `_prefer_nodes:,` to if possible, run the search on the specified nodes IDs. If not, select shards using the default method. * `_shards:,` to run the search only on the specified shards. You can combine this value with other `preference` values. However, the `_shards` value must come first. For example: `_shards:2,3|_local`. * `` (any string that does not start with `_`) to route searches with the same `` to the same shards in the same order. :param profile: Set to `true` to return detailed timing information about the execution of individual components in a search request. NOTE: This is a debugging tool and adds significant overhead to search execution. :param q: A query in the Lucene query string syntax. Query parameter searches do not support the full Elasticsearch Query DSL but are handy for testing. IMPORTANT: This parameter overrides the query parameter in the request body. If both parameters are specified, documents matching the query request body parameter are not returned. :param query: The search definition using the Query DSL. :param rank: The Reciprocal Rank Fusion (RRF) to use. :param request_cache: If `true`, the caching of search results is enabled for requests where `size` is `0`. It defaults to index level settings. :param rescore: Can be used to improve precision by reordering just the top (for example 100 - 500) documents returned by the `query` and `post_filter` phases. :param rest_total_hits_as_int: Indicates whether `hits.total` should be rendered as an integer or an object in the rest search response. :param retriever: A retriever is a specification to describe top documents returned from a search. A retriever replaces other elements of the search API that also return top documents such as `query` and `knn`. :param routing: A custom value that is used to route operations to a specific shard. :param runtime_mappings: One or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param scroll: The period to retain the search context for scrolling. By default, this value cannot exceed `1d` (24 hours). You can change this limit by using the `search.max_keep_alive` cluster-level setting. :param search_after: Used to retrieve the next page of hits using a set of sort values from the previous page. :param search_type: Indicates how distributed term frequencies are calculated for relevance scoring. :param seq_no_primary_term: If `true`, the request returns sequence number and primary term of the last modification of each hit. :param size: The number of hits to return, which must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` property. :param slice: Split a scrolled search into multiple slices that can be consumed independently. :param sort: A comma-separated list of : pairs. :param source: The source fields that are returned for matching documents. These fields are returned in the `hits._source` property of the search response. If the `stored_fields` property is specified, the `_source` property defaults to `false`. Otherwise, it defaults to `true`. :param source_excludes: A comma-separated list of source fields to exclude from the response. You can also use this parameter to exclude fields from the subset specified in `_source_includes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param source_includes: A comma-separated list of source fields to include in the response. If this parameter is specified, only these source fields are returned. You can exclude fields from this subset using the `_source_excludes` query parameter. If the `_source` parameter is `false`, this parameter is ignored. :param stats: The stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: A comma-separated list of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the `_source` property defaults to `false`. You can pass `_source: true` to return both source fields and stored fields in the search response. :param suggest: Defines a suggester that provides similar looking terms based on a provided text. :param suggest_field: The field to use for suggestions. :param suggest_mode: The suggest mode. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_size: The number of suggestions to return. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param suggest_text: The source text for which the suggestions should be returned. This parameter can be used only when the `suggest_field` and `suggest_text` query string parameters are specified. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this property to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this property for requests that target data streams with backing indices across multiple data tiers. If set to `0` (default), the query does not terminate early. :param timeout: The period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If `true`, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param typed_keys: If `true`, aggregation and suggester names are be prefixed by their respective types in the response. :param version: If `true`, the request returns the document version as part of a hit. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search' else: __path_parts = {} __path = "/_search" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if force_synthetic_source is not None: __query["force_synthetic_source"] = force_synthetic_source if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_named_queries_score is not None: __query["include_named_queries_score"] = include_named_queries_score if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if knn is not None: __body["knn"] = knn if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rank is not None: __body["rank"] = rank if rescore is not None: __body["rescore"] = rescore if retriever is not None: __body["retriever"] = retriever if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggs", "buffer", "exact_bounds", "extent", "fields", "grid_agg", "grid_precision", "grid_type", "query", "runtime_mappings", "size", "sort", "track_total_hits", "with_labels", ), ) def search_mvt( self, *, index: t.Union[str, t.Sequence[str]], field: str, zoom: int, x: int, y: int, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, buffer: t.Optional[int] = None, error_trace: t.Optional[bool] = None, exact_bounds: t.Optional[bool] = None, extent: t.Optional[int] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, grid_agg: t.Optional[t.Union[str, t.Literal["geohex", "geotile"]]] = None, grid_precision: t.Optional[int] = None, grid_type: t.Optional[ t.Union[str, t.Literal["centroid", "grid", "point"]] ] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, with_labels: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> BinaryApiResponse: """ .. raw:: html

Search a vector tile.

Search a vector tile for geospatial values. Before using this API, you should be familiar with the Mapbox vector tile specification. The API returns results as a binary mapbox vector tile.

Internally, Elasticsearch translates a vector tile search API request into a search containing:

  • A geo_bounding_box query on the <field>. The query uses the <zoom>/<x>/<y> tile as a bounding box.
  • A geotile_grid or geohex_grid aggregation on the <field>. The grid_agg parameter determines the aggregation type. The aggregation uses the <zoom>/<x>/<y> tile as a bounding box.
  • Optionally, a geo_bounds aggregation on the <field>. The search only includes this aggregation if the exact_bounds parameter is true.
  • If the optional parameter with_labels is true, the internal search will include a dynamic runtime field that calls the getLabelPosition function of the geometry doc value. This enables the generation of new point features containing suggested geometry labels, so that, for example, multi-polygons will have only one label.

The API returns results as a binary Mapbox vector tile. Mapbox vector tiles are encoded as Google Protobufs (PBF). By default, the tile contains three layers:

  • A hits layer containing a feature for each <field> value matching the geo_bounding_box query.
  • An aggs layer containing a feature for each cell of the geotile_grid or geohex_grid. The layer only contains features for cells with matching data.
  • A meta layer containing:
    • A feature containing a bounding box. By default, this is the bounding box of the tile.
    • Value ranges for any sub-aggregations on the geotile_grid or geohex_grid.
    • Metadata for the search.

The API only returns features that can display at its zoom level. For example, if a polygon feature has no area at its zoom level, the API omits it. The API returns errors as UTF-8 encoded JSON.

IMPORTANT: You can specify several options for this API as either a query parameter or request body parameter. If you specify both parameters, the query parameter takes precedence.

Grid precision for geotile

For a grid_agg of geotile, you can use cells in the aggs layer as tiles for lower zoom levels. grid_precision represents the additional zoom levels available through these cells. The final precision is computed by as follows: <zoom> + grid_precision. For example, if <zoom> is 7 and grid_precision is 8, then the geotile_grid aggregation will use a precision of 15. The maximum final precision is 29. The grid_precision also determines the number of cells for the grid as follows: (2^grid_precision) x (2^grid_precision). For example, a value of 8 divides the tile into a grid of 256 x 256 cells. The aggs layer only contains features for cells with matching data.

Grid precision for geohex

For a grid_agg of geohex, Elasticsearch uses <zoom> and grid_precision to calculate a final precision as follows: <zoom> + grid_precision.

This precision determines the H3 resolution of the hexagonal cells produced by the geohex aggregation. The following table maps the H3 resolution for each precision. For example, if <zoom> is 3 and grid_precision is 3, the precision is 6. At a precision of 6, hexagonal cells have an H3 resolution of 2. If <zoom> is 3 and grid_precision is 4, the precision is 7. At a precision of 7, hexagonal cells have an H3 resolution of 3.

Precision Unique tile bins H3 resolution Unique hex bins Ratio
1 4 0 122 30.5
2 16 0 122 7.625
3 64 1 842 13.15625
4 256 1 842 3.2890625
5 1024 2 5882 5.744140625
6 4096 2 5882 1.436035156
7 16384 3 41162 2.512329102
8 65536 3 41162 0.6280822754
9 262144 4 288122 1.099098206
10 1048576 4 288122 0.2747745514
11 4194304 5 2016842 0.4808526039
12 16777216 6 14117882 0.8414913416
13 67108864 6 14117882 0.2103728354
14 268435456 7 98825162 0.3681524172
15 1073741824 8 691776122 0.644266719
16 4294967296 8 691776122 0.1610666797
17 17179869184 9 4842432842 0.2818666889
18 68719476736 10 33897029882 0.4932667053
19 274877906944 11 237279209162 0.8632167343
20 1099511627776 11 237279209162 0.2158041836
21 4398046511104 12 1660954464122 0.3776573213
22 17592186044416 13 11626681248842 0.6609003122
23 70368744177664 13 11626681248842 0.165225078
24 281474976710656 14 81386768741882 0.2891438866
25 1125899906842620 15 569707381193162 0.5060018015
26 4503599627370500 15 569707381193162 0.1265004504
27 18014398509482000 15 569707381193162 0.03162511259
28 72057594037927900 15 569707381193162 0.007906278149
29 288230376151712000 15 569707381193162 0.001976569537

Hexagonal cells don't align perfectly on a vector tile. Some cells may intersect more than one vector tile. To compute the H3 resolution for each precision, Elasticsearch compares the average density of hexagonal bins at each resolution with the average density of tile bins at each zoom level. Elasticsearch uses the H3 resolution that is closest to the corresponding geotile density.

Learn how to use the vector tile search API with practical examples in the Vector tile search examples guide.

``_ :param index: Comma-separated list of data streams, indices, or aliases to search :param field: Field containing geospatial data to return :param zoom: Zoom level for the vector tile to search :param x: X coordinate for the vector tile to search :param y: Y coordinate for the vector tile to search :param aggs: Sub-aggregations for the geotile_grid. It supports the following aggregation types: - `avg` - `boxplot` - `cardinality` - `extended stats` - `max` - `median absolute deviation` - `min` - `percentile` - `percentile-rank` - `stats` - `sum` - `value count` The aggregation names can't start with `_mvt_`. The `_mvt_` prefix is reserved for internal aggregations. :param buffer: The size, in pixels, of a clipping buffer outside the tile. This allows renderers to avoid outline artifacts from geometries that extend past the extent of the tile. :param exact_bounds: If `false`, the meta layer's feature is the bounding box of the tile. If `true`, the meta layer's feature is a bounding box resulting from a `geo_bounds` aggregation. The aggregation runs on values that intersect the `//` tile with `wrap_longitude` set to `false`. The resulting bounding box may be larger than the vector tile. :param extent: The size, in pixels, of a side of the tile. Vector tiles are square with equal sides. :param fields: The fields to return in the `hits` layer. It supports wildcards (`*`). This parameter does not support fields with array values. Fields with array values may return inconsistent results. :param grid_agg: The aggregation used to create a grid for the `field`. :param grid_precision: Additional zoom levels available through the aggs layer. For example, if `` is `7` and `grid_precision` is `8`, you can zoom in up to level 15. Accepts 0-8. If 0, results don't include the aggs layer. :param grid_type: Determines the geometry type for features in the aggs layer. In the aggs layer, each feature represents a `geotile_grid` cell. If `grid, each feature is a polygon of the cells bounding box. If `point`, each feature is a Point that is the centroid of the cell. :param query: The query DSL used to filter documents for the search. :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param size: The maximum number of features to return in the hits layer. Accepts 0-10000. If 0, results don't include the hits layer. :param sort: Sort the features in the hits layer. By default, the API calculates a bounding box for each feature. It sorts features based on this box's diagonal length, from longest to shortest. :param track_total_hits: The number of hits matching the query to count accurately. If `true`, the exact number of hits is returned at the cost of some performance. If `false`, the response does not include the total number of hits matching the query. :param with_labels: If `true`, the hits and aggs layers will contain additional point features representing suggested label positions for the original features. * `Point` and `MultiPoint` features will have one of the points selected. * `Polygon` and `MultiPolygon` features will have a single point generated, either the centroid, if it is within the polygon, or another point within the polygon selected from the sorted triangle-tree. * `LineString` features will likewise provide a roughly central point selected from the triangle-tree. * The aggregation results will provide one central point for each aggregation bucket. All attributes from the original features will also be copied to the new label features. In addition, the new features will be distinguishable using the tag `_mvt_label_position`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if field in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'field'") if zoom in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'zoom'") if x in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'x'") if y in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'y'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "field": _quote(field), "zoom": _quote(zoom), "x": _quote(x), "y": _quote(y), } __path = f'/{__path_parts["index"]}/_mvt/{__path_parts["field"]}/{__path_parts["zoom"]}/{__path_parts["x"]}/{__path_parts["y"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if aggs is not None: __body["aggs"] = aggs if buffer is not None: __body["buffer"] = buffer if exact_bounds is not None: __body["exact_bounds"] = exact_bounds if extent is not None: __body["extent"] = extent if fields is not None: __body["fields"] = fields if grid_agg is not None: __body["grid_agg"] = grid_agg if grid_precision is not None: __body["grid_precision"] = grid_precision if grid_type is not None: __body["grid_type"] = grid_type if query is not None: __body["query"] = query if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if with_labels is not None: __body["with_labels"] = with_labels if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/vnd.mapbox-vector-tile"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_mvt", path_parts=__path_parts, ) @_rewrite_parameters() def search_shards( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the search shards.

Get the indices and shards that a search request would be run against. This information can be useful for working out issues or planning optimizations with routing and shard preferences. When filtered aliases are used, the filter is returned as part of the indices section.

If the Elasticsearch security features are enabled, you must have the view_index_metadata or manage index privilege for the target data stream, index, or alias.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. IT can also be set to `-1` to indicate that the request should never timeout. :param preference: The node or shard the operation should be performed on. It is random by default. :param routing: A custom value used to route operations to a specific shard. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search_shards' else: __path_parts = {} __path = "/_search_shards" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="search_shards", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("explain", "id", "params", "profile", "source"), ignore_deprecated_options={"params"}, ) def search_template( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, source: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search with a search template.

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param ccs_minimize_roundtrips: If `true`, network round-trips are minimized for cross-cluster search requests. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param explain: If `true`, returns detailed information about score calculation as part of each hit. If you specify both this and the `explain` query parameter, the API uses only the query parameter. :param id: The ID of the search template to use. If no `source` is specified, this parameter is required. :param ignore_throttled: If `true`, specified concrete, expanded, or aliased indices are not included in the response when throttled. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param params: Key-value pairs used to replace Mustache variables in the template. The key is the variable name. The value is the variable value. :param preference: The node or shard the operation should be performed on. It is random by default. :param profile: If `true`, the query execution is profiled. :param rest_total_hits_as_int: If `true`, `hits.total` is rendered as an integer in the response. If `false`, it is rendered as an object. :param routing: A custom value used to route operations to a specific shard. :param scroll: Specifies how long a consistent view of the index should be maintained for scrolled search. :param search_type: The type of the search operation. :param source: An inline search template. Supports the same parameters as the search API's request body. It also supports Mustache variables. If no `id` is specified, this parameter is required. :param typed_keys: If `true`, the response prefixes aggregation and suggester names with their respective types. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_search/template' else: __path_parts = {} __path = "/_search/template" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if explain is not None: __body["explain"] = explain if id is not None: __body["id"] = id if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if source is not None: __body["source"] = source __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "field", "case_insensitive", "index_filter", "search_after", "size", "string", "timeout", ), ) def terms_enum( self, *, index: str, field: t.Optional[str] = None, case_insensitive: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_filter: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, search_after: t.Optional[str] = None, size: t.Optional[int] = None, string: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get terms in an index.

Discover terms that match a partial string in an index. This API is designed for low-latency look-ups used in auto-complete scenarios.

info The terms enum API may return terms from deleted documents. Deleted documents are initially only marked as deleted. It is not until their segments are merged that documents are actually deleted. Until that happens, the terms enum API will return terms from these documents.

``_ :param index: A comma-separated list of data streams, indices, and index aliases to search. Wildcard (`*`) expressions are supported. To search all data streams or indices, omit this parameter or use `*` or `_all`. :param field: The string to match at the start of indexed terms. If not provided, all terms in the field are considered. :param case_insensitive: When `true`, the provided search string is matched against index terms without case sensitivity. :param index_filter: Filter an index shard if the provided query rewrites to `match_none`. :param search_after: The string after which terms in the index should be returned. It allows for a form of pagination if the last result from one request is passed as the `search_after` parameter for a subsequent request. :param size: The number of matching terms to return. :param string: The string to match at the start of indexed terms. If it is not provided, all terms in the field are considered. > info > The prefix string cannot be larger than the largest possible keyword value, which is Lucene's term byte-length limit of 32766. :param timeout: The maximum length of time to spend collecting results. If the timeout is exceeded the `complete` flag set to `false` in the response and the results may be partial or empty. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if field is None and body is None: raise ValueError("Empty value passed for parameter 'field'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_terms_enum' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if field is not None: __body["field"] = field if case_insensitive is not None: __body["case_insensitive"] = case_insensitive if index_filter is not None: __body["index_filter"] = index_filter if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if string is not None: __body["string"] = string if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="terms_enum", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "doc", "field_statistics", "fields", "filter", "offsets", "payloads", "per_field_analyzer", "positions", "routing", "term_statistics", "version", "version_type", ), ) def termvectors( self, *, index: str, id: t.Optional[str] = None, doc: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, field_statistics: t.Optional[bool] = None, fields: t.Optional[t.Sequence[str]] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, offsets: t.Optional[bool] = None, payloads: t.Optional[bool] = None, per_field_analyzer: t.Optional[t.Mapping[str, str]] = None, positions: t.Optional[bool] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, realtime: t.Optional[bool] = None, routing: t.Optional[str] = None, term_statistics: t.Optional[bool] = None, version: t.Optional[int] = None, version_type: t.Optional[ t.Union[str, t.Literal["external", "external_gte", "force", "internal"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get term vector information.

Get information and statistics about terms in the fields of a particular document.

You can retrieve term vectors for documents stored in the index or for artificial documents passed in the body of the request. You can specify the fields you are interested in through the fields parameter or by adding the fields to the request body. For example:

GET /my-index-000001/_termvectors/1?fields=message
          

Fields can be specified using wildcards, similar to the multi match query.

Term vectors are real-time by default, not near real-time. This can be changed by setting realtime parameter to false.

You can request three types of values: term information, term statistics, and field statistics. By default, all term information and field statistics are returned for all fields but term statistics are excluded.

Term information

  • term frequency in the field (always returned)
  • term positions (positions: true)
  • start and end offsets (offsets: true)
  • term payloads (payloads: true), as base64 encoded bytes

If the requested information wasn't stored in the index, it will be computed on the fly if possible. Additionally, term vectors could be computed for documents not even existing in the index, but instead provided by the user.

warn Start and end offsets assume UTF-16 encoding is being used. If you want to use these offsets in order to get the original text that produced this token, you should make sure that the string you are taking a sub-string of is also encoded using UTF-16.

Behaviour

The term and field statistics are not accurate. Deleted documents are not taken into account. The information is only retrieved for the shard the requested document resides in. The term and field statistics are therefore only useful as relative measures whereas the absolute numbers have no meaning in this context. By default, when requesting term vectors of artificial documents, a shard to get the statistics from is randomly selected. Use routing only to hit a particular shard. Refer to the linked documentation for detailed examples of how to use this API.

``_ :param index: The name of the index that contains the document. :param id: A unique identifier for the document. :param doc: An artificial document (a document not present in the index) for which you want to retrieve term vectors. :param field_statistics: If `true`, the response includes: * The document count (how many documents contain this field). * The sum of document frequencies (the sum of document frequencies for all terms in this field). * The sum of total term frequencies (the sum of total term frequencies of each term in this field). :param fields: A list of fields to include in the statistics. It is used as the default list unless a specific field list is provided in the `completion_fields` or `fielddata_fields` parameters. :param filter: Filter terms based on their tf-idf scores. This could be useful in order find out a good characteristic vector of a document. This feature works in a similar manner to the second phase of the More Like This Query. :param offsets: If `true`, the response includes term offsets. :param payloads: If `true`, the response includes term payloads. :param per_field_analyzer: Override the default per-field analyzer. This is useful in order to generate term vectors in any fashion, especially when using artificial documents. When providing an analyzer for a field that already stores term vectors, the term vectors will be regenerated. :param positions: If `true`, the response includes term positions. :param preference: The node or shard the operation should be performed on. It is random by default. :param realtime: If true, the request is real-time as opposed to near-real-time. :param routing: A custom value that is used to route operations to a specific shard. :param term_statistics: If `true`, the response includes: * The total term frequency (how often a term occurs in all documents). * The document frequency (the number of documents containing the current term). By default these values are not returned since term statistics can have a serious performance impact. :param version: If `true`, returns the document version as part of a hit. :param version_type: The version type. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and id not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_termvectors/{__path_parts["id"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_termvectors' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if realtime is not None: __query["realtime"] = realtime if not __body: if doc is not None: __body["doc"] = doc if field_statistics is not None: __body["field_statistics"] = field_statistics if fields is not None: __body["fields"] = fields if filter is not None: __body["filter"] = filter if offsets is not None: __body["offsets"] = offsets if payloads is not None: __body["payloads"] = payloads if per_field_analyzer is not None: __body["per_field_analyzer"] = per_field_analyzer if positions is not None: __body["positions"] = positions if routing is not None: __body["routing"] = routing if term_statistics is not None: __body["term_statistics"] = term_statistics if version is not None: __body["version"] = version if version_type is not None: __body["version_type"] = version_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="termvectors", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "detect_noop", "doc", "doc_as_upsert", "script", "scripted_upsert", "source", "upsert", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", }, ) def update( self, *, index: str, id: str, detect_noop: t.Optional[bool] = None, doc: t.Optional[t.Mapping[str, t.Any]] = None, doc_as_upsert: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, include_source_on_error: t.Optional[bool] = None, lang: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, require_alias: t.Optional[bool] = None, retry_on_conflict: t.Optional[int] = None, routing: t.Optional[str] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scripted_upsert: t.Optional[bool] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, upsert: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a document.

Update a document by running a script or passing a partial document.

If the Elasticsearch security features are enabled, you must have the index or write index privilege for the target index or index alias.

The script can update, delete, or skip modifying the document. The API also supports passing a partial document, which is merged into the existing document. To fully replace an existing document, use the index API. This operation:

  • Gets the document (collocated with the shard) from the index.
  • Runs the specified script.
  • Indexes the result.

The document must still be reindexed, but using this API removes some network roundtrips and reduces chances of version conflicts between the GET and the index operation.

The _source field must be enabled to use this API. In addition to _source, you can access the following variables through the ctx map: _index, _type, _id, _version, _routing, and _now (the current timestamp). For usage examples such as partial updates, upserts, and scripted updates, see the External documentation.

``_ :param index: The name of the target index. By default, the index is created automatically if it doesn't exist. :param id: A unique identifier for the document to be updated. :param detect_noop: If `true`, the `result` in the response is set to `noop` (no operation) when there are no changes to the document. :param doc: A partial update to an existing document. If both `doc` and `script` are specified, `doc` is ignored. :param doc_as_upsert: If `true`, use the contents of 'doc' as the value of 'upsert'. NOTE: Using ingest pipelines with `doc_as_upsert` is not supported. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param include_source_on_error: True or false if to include the document source in the error message in case of parsing errors. :param lang: The script language. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. :param require_alias: If `true`, the destination must be an index alias. :param retry_on_conflict: The number of times the operation should be retried when a conflict occurs. :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document. :param scripted_upsert: If `true`, run the script whether or not the document exists. :param source: If `false`, turn off source retrieval. You can also specify a comma-separated list of the fields you want to retrieve. :param source_excludes: The source fields you want to exclude. :param source_includes: The source fields you want to retrieve. :param timeout: The period to wait for the following operations: dynamic mapping updates and waiting for active shards. Elasticsearch waits for at least the timeout period before failing. The actual wait time could be longer, particularly when multiple waits occur. :param upsert: If the document does not already exist, the contents of 'upsert' are inserted as a new document. If the document exists, the 'script' is run. :param wait_for_active_shards: The number of copies of each shard that must be active before proceeding with the operation. Set to 'all' or any positive integer up to the total number of shards in the index (`number_of_replicas`+1). The default value of `1` means it waits for each primary shard to be active. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "id": _quote(id)} __path = f'/{__path_parts["index"]}/_update/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if include_source_on_error is not None: __query["include_source_on_error"] = include_source_on_error if lang is not None: __query["lang"] = lang if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if require_alias is not None: __query["require_alias"] = require_alias if retry_on_conflict is not None: __query["retry_on_conflict"] = retry_on_conflict if routing is not None: __query["routing"] = routing if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if detect_noop is not None: __body["detect_noop"] = detect_noop if doc is not None: __body["doc"] = doc if doc_as_upsert is not None: __body["doc_as_upsert"] = doc_as_upsert if script is not None: __body["script"] = script if scripted_upsert is not None: __body["scripted_upsert"] = scripted_upsert if source is not None: __body["_source"] = source if upsert is not None: __body["upsert"] = upsert __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="update", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("conflicts", "max_docs", "query", "script", "slice"), parameter_aliases={"from": "from_"}, ) def update_by_query( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, conflicts: t.Optional[t.Union[str, t.Literal["abort", "proceed"]]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, max_docs: t.Optional[int] = None, pipeline: t.Optional[str] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, refresh: t.Optional[bool] = None, request_cache: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, routing: t.Optional[str] = None, script: t.Optional[t.Mapping[str, t.Any]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, scroll_size: t.Optional[int] = None, search_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, slices: t.Optional[t.Union[int, t.Union[str, t.Literal["auto"]]]] = None, sort: t.Optional[t.Sequence[str]] = None, stats: t.Optional[t.Sequence[str]] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[bool] = None, version_type: t.Optional[bool] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update documents. Updates documents that match the specified query. If no query is specified, performs an update on every document in the data stream or index without modifying the source, which is useful for picking up mapping changes.

If the Elasticsearch security features are enabled, you must have the following index privileges for the target data stream, index, or alias:

  • read
  • index or write

You can specify the query criteria in the request URI or the request body using the same syntax as the search API.

When you submit an update by query request, Elasticsearch gets a snapshot of the data stream or index when it begins processing the request and updates matching documents using internal versioning. When the versions match, the document is updated and the version number is incremented. If a document changes between the time that the snapshot is taken and the update operation is processed, it results in a version conflict and the operation fails. You can opt to count version conflicts instead of halting and returning by setting conflicts to proceed. Note that if you opt to count version conflicts, the operation could attempt to update more documents from the source than max_docs until it has successfully updated max_docs documents or it has gone through every document in the source query.

NOTE: Documents with a version equal to 0 cannot be updated using update by query because internal versioning does not support 0 as a valid version number.

While processing an update by query request, Elasticsearch performs multiple search requests sequentially to find all of the matching documents. A bulk update request is performed for each batch of matching documents. Any query or update failures cause the update by query request to fail and the failures are shown in the response. Any update requests that completed successfully still stick, they are not rolled back.

Refreshing shards

Specifying the refresh parameter refreshes all shards once the request completes. This is different to the update API's refresh parameter, which causes only the shard that received the request to be refreshed. Unlike the update API, it does not support wait_for.

Running update by query asynchronously

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at .tasks/task/${taskId}.

Waiting for active shards

wait_for_active_shards controls how many copies of a shard must be active before proceeding with the request. See wait_for_active_shards for details. timeout controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the Bulk API. Update by query uses scrolled searches, so you can also specify the scroll parameter to control how long it keeps the search context alive, for example ?scroll=10m. The default is 5 minutes.

Throttling update requests

To control the rate at which update by query issues batches of update operations, you can set requests_per_second to any positive decimal number. This pads each batch with a wait time to throttle the rate. Set requests_per_second to -1 to turn off throttling.

Throttling uses a wait time between batches so that the internal scroll requests can be given a timeout that takes the request padding into account. The padding time is the difference between the batch size divided by the requests_per_second and the time spent writing. By default the batch size is 1000, so if requests_per_second is set to 500:

target_time = 1000 / 500 per second = 2 seconds
          wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
          

Since the batch is issued as a single _bulk request, large batch sizes cause Elasticsearch to create many requests and wait before starting the next set. This is "bursty" instead of "smooth".

Slicing

Update by query supports sliced scroll to parallelize the update process. This can improve efficiency and provide a convenient way to break the request down into smaller parts.

Setting slices to auto chooses a reasonable number for most data streams and indices. This setting will use one slice per shard, up to a certain limit. If there are multiple source data streams or indices, it will choose the number of slices based on the index or backing index with the smallest number of shards.

Adding slices to _update_by_query just automates the manual process of creating sub-requests, which means it has some quirks:

  • You can see these requests in the tasks APIs. These sub-requests are "child" tasks of the task for the request with slices.
  • Fetching the status of the task for the request with slices only contains the status of completed slices.
  • These sub-requests are individually addressable for things like cancellation and rethrottling.
  • Rethrottling the request with slices will rethrottle the unfinished sub-request proportionally.
  • Canceling the request with slices will cancel each sub-request.
  • Due to the nature of slices each sub-request won't get a perfectly even portion of the documents. All documents will be addressed, but some slices may be larger than others. Expect larger slices to have a more even distribution.
  • Parameters like requests_per_second and max_docs on a request with slices are distributed proportionally to each sub-request. Combine that with the point above about distribution being uneven and you should conclude that using max_docs with slices might not result in exactly max_docs documents being updated.
  • Each sub-request gets a slightly different snapshot of the source data stream or index though these are all taken at approximately the same time.

If you're slicing manually or otherwise tuning automatic slicing, keep in mind that:

  • Query performance is most efficient when the number of slices is equal to the number of shards in the index or backing index. If that number is large (for example, 500), choose a lower number as too many slices hurts performance. Setting slices higher than the number of shards generally does not improve efficiency and adds overhead.
  • Update performance scales linearly across available resources with the number of slices.

Whether query or update performance dominates the runtime depends on the documents being reindexed and cluster resources. Refer to the linked documentation for examples of how to update documents using the _update_by_query API:

``_ :param index: A comma-separated list of data streams, indices, and aliases to search. It supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. This parameter can be used only when the `q` query string parameter is specified. :param analyzer: The analyzer to use for the query string. This parameter can be used only when the `q` query string parameter is specified. :param conflicts: The preferred behavior when update by query hits version conflicts: `abort` or `proceed`. :param default_operator: The default operator for query string query: `AND` or `OR`. This parameter can be used only when the `q` query string parameter is specified. :param df: The field to use as default where no field prefix is given in the query string. This parameter can be used only when the `q` query string parameter is specified. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param from_: Skips the specified number of documents. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. This parameter can be used only when the `q` query string parameter is specified. :param max_docs: The maximum number of documents to update. :param pipeline: The ID of the pipeline to use to preprocess incoming documents. If the index has a default ingest pipeline specified, then setting the value to `_none` disables the default ingest pipeline for this request. If a final pipeline is configured it will always run, regardless of the value of this parameter. :param preference: The node or shard the operation should be performed on. It is random by default. :param q: A query in the Lucene query string syntax. :param query: The documents to update using the Query DSL. :param refresh: If `true`, Elasticsearch refreshes affected shards to make the operation visible to search after the request completes. This is different than the update API's `refresh` parameter, which causes just the shard that received the request to be refreshed. :param request_cache: If `true`, the request cache is used for this request. It defaults to the index-level setting. :param requests_per_second: The throttle for this request in sub-requests per second. :param routing: A custom value used to route operations to a specific shard. :param script: The script to run to update the document source or metadata when updating. :param scroll: The period to retain the search context for scrolling. :param scroll_size: The size of the scroll request that powers the operation. :param search_timeout: An explicit timeout for each search request. By default, there is no timeout. :param search_type: The type of the search operation. Available options include `query_then_fetch` and `dfs_query_then_fetch`. :param slice: Slice the request manually using the provided slice ID and total number of slices. :param slices: The number of slices this task should be divided into. :param sort: A comma-separated list of : pairs. :param stats: The specific `tag` of the request for logging and statistical purposes. :param terminate_after: The maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. IMPORTANT: Use with caution. Elasticsearch applies this parameter to each shard handling the request. When possible, let Elasticsearch perform early termination automatically. Avoid specifying this parameter for requests that target data streams with backing indices across multiple data tiers. :param timeout: The period each update request waits for the following operations: dynamic mapping updates, waiting for active shards. By default, it is one minute. This guarantees Elasticsearch waits for at least the timeout before failing. The actual wait time could be longer, particularly when multiple waits occur. :param version: If `true`, returns the document version as part of a hit. :param version_type: Should the document increment the version number (internal) on hit or not (reindex) :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). The `timeout` parameter controls how long each write request waits for unavailable shards to become available. Both work exactly the way they work in the bulk API. :param wait_for_completion: If `true`, the request blocks until the operation is complete. If `false`, Elasticsearch performs some preflight checks, launches the request, and returns a task ID that you can use to cancel or get the status of the task. Elasticsearch creates a record of this task as a document at `.tasks/task/${taskId}`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_update_by_query' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if pipeline is not None: __query["pipeline"] = pipeline if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if refresh is not None: __query["refresh"] = refresh if request_cache is not None: __query["request_cache"] = request_cache if requests_per_second is not None: __query["requests_per_second"] = requests_per_second if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if scroll_size is not None: __query["scroll_size"] = scroll_size if search_timeout is not None: __query["search_timeout"] = search_timeout if search_type is not None: __query["search_type"] = search_type if slices is not None: __query["slices"] = slices if sort is not None: __query["sort"] = sort if stats is not None: __query["stats"] = stats if terminate_after is not None: __query["terminate_after"] = terminate_after if timeout is not None: __query["timeout"] = timeout if version is not None: __query["version"] = version if version_type is not None: __query["version_type"] = version_type if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if conflicts is not None: __body["conflicts"] = conflicts if max_docs is not None: __body["max_docs"] = max_docs if query is not None: __body["query"] = query if script is not None: __body["script"] = script if slice is not None: __body["slice"] = slice if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="update_by_query", path_parts=__path_parts, ) @_rewrite_parameters() def update_by_query_rethrottle( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Throttle an update by query operation.

Change the number of requests per second for a particular update by query operation. Rethrottling that speeds up the query takes effect immediately but rethrotting that slows down the query takes effect after completing the current batch to prevent scroll timeouts.

``_ :param task_id: The ID for the task. :param requests_per_second: The throttle for this request in sub-requests per second. To turn off throttling, set it to `-1`. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_update_by_query/{__path_parts["task_id"]}/_rethrottle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if requests_per_second is not None: __query["requests_per_second"] = requests_per_second __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="update_by_query_rethrottle", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/_base.py000066400000000000000000000361001506101734100244770ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import re import warnings from typing import ( Any, Callable, Collection, Dict, Iterable, List, Mapping, Optional, Tuple, Union, ) from elastic_transport import ( ApiResponse, BinaryApiResponse, HeadApiResponse, HttpHeaders, ListApiResponse, NodeConfig, ObjectApiResponse, OpenTelemetrySpan, SniffOptions, TextApiResponse, Transport, ) from elastic_transport.client_utils import DEFAULT, DefaultType from ..._otel import OpenTelemetry from ..._version import __versionstr__ from ...compat import warn_stacklevel from ...exceptions import ( HTTP_EXCEPTIONS, ApiError, ConnectionError, ElasticsearchWarning, SerializationError, UnsupportedProductError, ) from .utils import _TYPE_SYNC_SNIFF_CALLBACK, _base64_auth_header, _quote_query _WARNING_RE = re.compile(r"\"([^\"]*)\"") _COMPAT_MIMETYPE_TEMPLATE = "application/vnd.elasticsearch+%s; compatible-with=" + str( __versionstr__.partition(".")[0] ) _COMPAT_MIMETYPE_RE = re.compile(r"application/(json|x-ndjson|vnd\.mapbox-vector-tile)") _COMPAT_MIMETYPE_SUB = _COMPAT_MIMETYPE_TEMPLATE % (r"\g<1>",) def resolve_auth_headers( headers: Optional[Mapping[str, str]], http_auth: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, api_key: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, basic_auth: Union[DefaultType, None, Tuple[str, str], str] = DEFAULT, bearer_auth: Union[DefaultType, None, str] = DEFAULT, ) -> HttpHeaders: if headers is None: headers = HttpHeaders() elif not isinstance(headers, HttpHeaders): headers = HttpHeaders(headers) resolved_http_auth = http_auth if http_auth is not DEFAULT else None resolved_basic_auth = basic_auth if basic_auth is not DEFAULT else None if resolved_http_auth is not None: if resolved_basic_auth is not None: raise ValueError( "Can't specify both 'http_auth' and 'basic_auth', " "instead only specify 'basic_auth'" ) if isinstance(http_auth, str) or ( isinstance(resolved_http_auth, (list, tuple)) and all(isinstance(x, str) for x in resolved_http_auth) ): resolved_basic_auth = resolved_http_auth else: raise TypeError( "The deprecated 'http_auth' parameter must be either 'Tuple[str, str]' or 'str'. " "Use either the 'basic_auth' parameter instead" ) warnings.warn( "The 'http_auth' parameter is deprecated. " "Use 'basic_auth' or 'bearer_auth' parameters instead", category=DeprecationWarning, stacklevel=warn_stacklevel(), ) resolved_api_key = api_key if api_key is not DEFAULT else None resolved_bearer_auth = bearer_auth if bearer_auth is not DEFAULT else None if resolved_api_key or resolved_basic_auth or resolved_bearer_auth: if ( sum( x is not None for x in ( resolved_api_key, resolved_basic_auth, resolved_bearer_auth, ) ) > 1 ): raise ValueError( "Can only set one of 'api_key', 'basic_auth', and 'bearer_auth'" ) if headers and headers.get("authorization", None) is not None: raise ValueError( "Can't set 'Authorization' HTTP header with other authentication options" ) if resolved_api_key: headers["authorization"] = f"ApiKey {_base64_auth_header(resolved_api_key)}" if resolved_basic_auth: headers["authorization"] = ( f"Basic {_base64_auth_header(resolved_basic_auth)}" ) if resolved_bearer_auth: headers["authorization"] = f"Bearer {resolved_bearer_auth}" return headers def create_sniff_callback( host_info_callback: Optional[ Callable[[Dict[str, Any], Dict[str, Any]], Optional[Dict[str, Any]]] ] = None, sniffed_node_callback: Optional[ Callable[[Dict[str, Any], NodeConfig], Optional[NodeConfig]] ] = None, ) -> _TYPE_SYNC_SNIFF_CALLBACK: assert (host_info_callback is None) != (sniffed_node_callback is None) # Wrap the deprecated 'host_info_callback' into 'sniffed_node_callback' if host_info_callback is not None: def _sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: assert host_info_callback is not None if ( host_info_callback( # type ignore[misc] node_info, {"host": node_config.host, "port": node_config.port} ) is None ): return None return node_config sniffed_node_callback = _sniffed_node_callback def sniff_callback( transport: Transport, sniff_options: SniffOptions ) -> List[NodeConfig]: for _ in transport.node_pool.all(): try: meta, node_infos = transport.perform_request( "GET", "/_nodes/_all/http", headers={ "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, request_timeout=( sniff_options.sniff_timeout if not sniff_options.is_initial_sniff else None ), ) except (SerializationError, ConnectionError): continue if not 200 <= meta.status <= 299: continue node_configs = [] for node_info in node_infos.get("nodes", {}).values(): address = node_info.get("http", {}).get("publish_address") if not address or ":" not in address: continue if "/" in address: # Support 7.x host/ip:port behavior where http.publish_host has been set. fqdn, ipaddress = address.split("/", 1) host = fqdn _, port_str = ipaddress.rsplit(":", 1) port = int(port_str) else: host, port_str = address.rsplit(":", 1) port = int(port_str) assert sniffed_node_callback is not None sniffed_node = sniffed_node_callback( node_info, meta.node.replace(host=host, port=port) ) if sniffed_node is None: continue # Use the node which was able to make the request as a base. node_configs.append(sniffed_node) if node_configs: return node_configs return [] return sniff_callback def _default_sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: if node_info.get("roles", []) == ["master"]: return None return node_config default_sniff_callback = create_sniff_callback( sniffed_node_callback=_default_sniffed_node_callback ) class BaseClient: def __init__(self, _transport: Transport) -> None: self._transport = _transport self._client_meta: Union[DefaultType, Tuple[Tuple[str, str], ...]] = DEFAULT self._headers = HttpHeaders() self._request_timeout: Union[DefaultType, Optional[float]] = DEFAULT self._ignore_status: Union[DefaultType, Collection[int]] = DEFAULT self._max_retries: Union[DefaultType, int] = DEFAULT self._retry_on_timeout: Union[DefaultType, bool] = DEFAULT self._retry_on_status: Union[DefaultType, Collection[int]] = DEFAULT self._verified_elasticsearch = False self._otel = OpenTelemetry() @property def transport(self) -> Transport: return self._transport def perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, endpoint_id: Optional[str] = None, path_parts: Optional[Mapping[str, Any]] = None, ) -> ApiResponse[Any]: with self._otel.span( method, endpoint_id=endpoint_id, path_parts=path_parts or {}, ) as otel_span: response = self._perform_request( method, path, params=params, headers=headers, body=body, otel_span=otel_span, ) otel_span.set_elastic_cloud_metadata(response.meta.headers) return response def _perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, otel_span: OpenTelemetrySpan, ) -> ApiResponse[Any]: if headers: request_headers = self._headers.copy() request_headers.update(headers) else: request_headers = self._headers def mimetype_header_to_compat(header: str) -> None: # Converts all parts of a Accept/Content-Type headers # from application/X -> application/vnd.elasticsearch+X mimetype = request_headers.get(header, None) if mimetype: request_headers[header] = _COMPAT_MIMETYPE_RE.sub( _COMPAT_MIMETYPE_SUB, mimetype ) mimetype_header_to_compat("Accept") mimetype_header_to_compat("Content-Type") if params: target = f"{path}?{_quote_query(params)}" else: target = path meta, resp_body = self.transport.perform_request( method, target, headers=request_headers, body=body, request_timeout=self._request_timeout, max_retries=self._max_retries, retry_on_status=self._retry_on_status, retry_on_timeout=self._retry_on_timeout, client_meta=self._client_meta, otel_span=otel_span, ) # HEAD with a 404 is returned as a normal response # since this is used as an 'exists' functionality. if not (method == "HEAD" and meta.status == 404) and ( not 200 <= meta.status < 299 and ( self._ignore_status is DEFAULT or self._ignore_status is None or meta.status not in self._ignore_status ) ): message = str(resp_body) # If the response is an error response try parsing # the raw Elasticsearch error before raising. if isinstance(resp_body, dict): try: error = resp_body.get("error", message) if isinstance(error, dict) and "type" in error: error = error["type"] message = error except (ValueError, KeyError, TypeError): pass raise HTTP_EXCEPTIONS.get(meta.status, ApiError)( message=message, meta=meta, body=resp_body ) # 'X-Elastic-Product: Elasticsearch' should be on every 2XX response. if not self._verified_elasticsearch: # If the header is set we mark the server as verified. if meta.headers.get("x-elastic-product", "") == "Elasticsearch": self._verified_elasticsearch = True # Otherwise we only raise an error on 2XX responses. elif meta.status >= 200 and meta.status < 300: raise UnsupportedProductError( message=( "The client noticed that the server is not Elasticsearch " "and we do not support this unknown product" ), meta=meta, body=resp_body, ) # 'Warning' headers should be reraised as 'ElasticsearchWarning' if "warning" in meta.headers: warning_header = (meta.headers.get("warning") or "").strip() warning_messages: Iterable[str] = _WARNING_RE.findall(warning_header) or ( warning_header, ) stacklevel = warn_stacklevel() for warning_message in warning_messages: warnings.warn( warning_message, category=ElasticsearchWarning, stacklevel=stacklevel, ) if method == "HEAD": response = HeadApiResponse(meta=meta) elif isinstance(resp_body, dict): response = ObjectApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] elif isinstance(resp_body, list): response = ListApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] elif isinstance(resp_body, str): response = TextApiResponse( # type: ignore[assignment] body=resp_body, meta=meta, ) elif isinstance(resp_body, bytes): response = BinaryApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] else: response = ApiResponse(body=resp_body, meta=meta) # type: ignore[assignment] return response class NamespacedClient(BaseClient): def __init__(self, client: "BaseClient") -> None: self._client = client super().__init__(self._client.transport) def perform_request( self, method: str, path: str, *, params: Optional[Mapping[str, Any]] = None, headers: Optional[Mapping[str, str]] = None, body: Optional[Any] = None, endpoint_id: Optional[str] = None, path_parts: Optional[Mapping[str, Any]] = None, ) -> ApiResponse[Any]: # Use the internal clients .perform_request() implementation # so we take advantage of their transport options. return self._client.perform_request( method, path, params=params, headers=headers, body=body, endpoint_id=endpoint_id, path_parts=path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/async_search.py000066400000000000000000000735141506101734100261020ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class AsyncSearchClient(NamespacedClient): @_rewrite_parameters() def delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async search.

If the asynchronous search is still running, it is cancelled. Otherwise, the saved search results are deleted. If the Elasticsearch security features are enabled, the deletion of a specific async search is restricted to: the authenticated user that submitted the original search request; users that have the cancel_task cluster privilege.

``_ :param id: A unique identifier for the async search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="async_search.delete", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, typed_keys: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async search results.

Retrieve the results of a previously submitted asynchronous search request. If the Elasticsearch security features are enabled, access to the results of a specific async search is restricted to the user or API key that submitted it.

``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search should be available in the cluster. When not specified, the `keep_alive` set with the corresponding submit async request will be used. Otherwise, it is possible to override the value and extend the validity of the request. When this period expires, the search, if still running, is cancelled. If the search is completed, its saved results are deleted. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response :param wait_for_completion_timeout: Specifies to wait for the search to be completed up until the provided timeout. Final results will be returned if available before the timeout expires, otherwise the currently available results will be returned once the timeout expires. By default no timeout is set meaning that the currently available results will be returned without any additional wait. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="async_search.get", path_parts=__path_parts, ) @_rewrite_parameters() def status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async search status.

Get the status of a previously submitted async search request given its identifier, without retrieving search results. If the Elasticsearch security features are enabled, the access to the status of a specific async search is restricted to:

  • The user or API key that submitted the original async search request.
  • Users that have the monitor cluster privilege or greater privileges.
``_ :param id: A unique identifier for the async search. :param keep_alive: The length of time that the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_async_search/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="async_search.status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "knn", "min_score", "pit", "post_filter", "profile", "query", "rescore", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) def submit( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, knn: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an async search.

When the primary sort of the results is an indexed field, shards get sorted based on minimum and maximum value that they hold for that field. Partial results become available following the sort criteria that was requested.

Warning: Asynchronous search does not support scroll or search requests that include only the suggest section.

By default, Elasticsearch does not allow you to store an async search response larger than 10Mb and an attempt to do this results in an error. The maximum allowed size for a stored async search response can be set by changing the search.max_async_search_response_size cluster level setting.

``_ :param index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices :param aggregations: :param aggs: :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param allow_partial_search_results: Indicate if an error should be returned if there is a partial search failure or timeout :param analyze_wildcard: Specify whether wildcard and prefix queries should be analyzed (default: false) :param analyzer: The analyzer to use for the query string :param batched_reduce_size: Affects how often partial results become available, which happens whenever shard results are reduced. A partial reduction is performed every time the coordinating node has received a certain number of new shard responses (5 by default). :param ccs_minimize_roundtrips: The default value is the only supported value. :param collapse: :param default_operator: The default operator for query string query (AND or OR) :param df: The field to use as default where no field prefix is given in the query string :param docvalue_fields: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param explain: If true, returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param highlight: :param ignore_throttled: Whether specified concrete, expanded or aliased indices should be ignored when throttled :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param indices_boost: Boosts the _score of documents from specified indices. :param keep_alive: Specifies how long the async search needs to be available. Ongoing async searches and any saved search results are deleted after this period. :param keep_on_completion: If `true`, results are stored for later retrieval when the search completes within the `wait_for_completion_timeout`. :param knn: Defines the approximate kNN search to run. :param lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :param max_concurrent_shard_requests: The number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the impact of the search on the cluster in order to limit the number of concurrent shard requests :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: :param preference: Specify the node or shard the operation should be performed on (default: random) :param profile: :param q: Query in the Lucene query string syntax :param query: Defines the search definition using the Query DSL. :param request_cache: Specify if request cache should be used for this request or not, defaults to true :param rescore: :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param routing: A comma-separated list of specific routing values :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param search_after: :param search_type: Search operation type :param seq_no_primary_term: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. :param size: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param slice: :param sort: :param source: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. :param source_excludes: A list of fields to exclude from the returned _source field :param source_includes: A list of fields to extract and return from the _source field :param stats: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. :param suggest: :param suggest_field: Specifies which field to use for suggestions. :param suggest_mode: Specify suggest mode :param suggest_size: How many suggestions to return in response :param suggest_text: The source text for which the suggestions should be returned. :param terminate_after: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If true, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response :param version: If true, returns document version as part of a hit. :param wait_for_completion_timeout: Blocks and waits until the search is completed up to a certain timeout. When the async search completes within the timeout, the response won’t include the ID as the results are not stored in the cluster. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_async_search' else: __path_parts = {} __path = "/_async_search" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if keep_alive is not None: __query["keep_alive"] = keep_alive if keep_on_completion is not None: __query["keep_on_completion"] = keep_on_completion if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if knn is not None: __body["knn"] = knn if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rescore is not None: __body["rescore"] = rescore if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="async_search.submit", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/autoscaling.py000066400000000000000000000257171506101734100257530ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class AutoscalingClient(NamespacedClient): @_rewrite_parameters() def delete_autoscaling_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="autoscaling.delete_autoscaling_policy", path_parts=__path_parts, ) @_rewrite_parameters() def get_autoscaling_capacity( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the autoscaling capacity.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

This API gets the current autoscaling capacity based on the configured autoscaling policy. It will return information to size the cluster appropriately to the current workload.

The required_capacity is calculated as the maximum of the required_capacity result of all individual deciders that are enabled for the policy.

The operator should verify that the current_nodes match the operator’s knowledge of the cluster to avoid making autoscaling decisions based on stale or incomplete information.

The response contains decider-specific information you can use to diagnose how and why autoscaling determined a certain capacity was required. This information is provided for diagnosis only. Do not use this information to make autoscaling decisions.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_autoscaling/capacity" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="autoscaling.get_autoscaling_capacity", path_parts=__path_parts, ) @_rewrite_parameters() def get_autoscaling_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="autoscaling.get_autoscaling_policy", path_parts=__path_parts, ) @_rewrite_parameters( body_name="policy", ) def put_autoscaling_policy( self, *, name: str, policy: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an autoscaling policy.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

``_ :param name: the name of the autoscaling policy :param policy: :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if policy is None and body is None: raise ValueError( "Empty value passed for parameters 'policy' and 'body', one of them should be set." ) elif policy is not None and body is not None: raise ValueError("Cannot set both 'policy' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_autoscaling/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = policy if policy is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="autoscaling.put_autoscaling_policy", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/cat.py000066400000000000000000005330751506101734100242120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class CatClient(NamespacedClient): @_rewrite_parameters() def aliases( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "alias", "filter", "index", "is_write_index", "routing.index", "routing.search", ], ] ], t.Union[ str, t.Literal[ "alias", "filter", "index", "is_write_index", "routing.index", "routing.search", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get aliases.

Get the cluster's index aliases, including filter and routing information. This API does not return data stream aliases.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or the Kibana console. They are not intended for use by applications. For application consumption, use the aliases API.

``_ :param name: A comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicated that the request should never timeout, you can set it to `-1`. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/aliases/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/aliases" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.aliases", path_parts=__path_parts, ) @_rewrite_parameters() def allocation( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "disk.avail", "disk.indices", "disk.indices.forecast", "disk.percent", "disk.total", "disk.used", "host", "ip", "node", "node.role", "shards", "shards.undesired", "write_load.forecast", ], ] ], t.Union[ str, t.Literal[ "disk.avail", "disk.indices", "disk.indices.forecast", "disk.percent", "disk.total", "disk.used", "host", "ip", "node", "node.role", "shards", "shards.undesired", "write_load.forecast", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard allocation information.

Get a snapshot of the number of shards allocated to each data node and their disk space.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

``_ :param node_id: A comma-separated list of node identifiers or names used to limit the returned information. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_cat/allocation/{__path_parts["node_id"]}' else: __path_parts = {} __path = "/_cat/allocation" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.allocation", path_parts=__path_parts, ) @_rewrite_parameters() def component_templates( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "alias_count", "included_in", "mapping_count", "metadata_count", "name", "settings_count", "version", ], ] ], t.Union[ str, t.Literal[ "alias_count", "included_in", "mapping_count", "metadata_count", "name", "settings_count", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get component templates.

Get information about component templates in a cluster. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get component template API.

``_ :param name: The name of the component template. It accepts wildcard expressions. If it is omitted, all component templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: The period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/component_templates/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/component_templates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.component_templates", path_parts=__path_parts, ) @_rewrite_parameters() def count( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["count", "epoch", "timestamp"]]], t.Union[str, t.Literal["count", "epoch", "timestamp"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get a document count.

Get quick access to a document count for a data stream, an index, or an entire cluster. The document count only includes live documents, not deleted documents which have not yet been removed by the merge process.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the count API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. It supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/count/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/count" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.count", path_parts=__path_parts, ) @_rewrite_parameters() def fielddata( self, *, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]] ], t.Union[str, t.Literal["field", "host", "id", "ip", "node", "size"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get field data cache information.

Get the amount of heap memory currently used by the field data cache on every data node in the cluster.

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes stats API.

``_ :param fields: Comma-separated list of fields used to limit returned information. To retrieve all fields, omit this parameter. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if fields not in SKIP_IN_PATH: __path_parts = {"fields": _quote(fields)} __path = f'/_cat/fielddata/{__path_parts["fields"]}' else: __path_parts = {} __path = "/_cat/fielddata" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.fielddata", path_parts=__path_parts, ) @_rewrite_parameters() def health( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "active_shards_percent", "cluster", "epoch", "init", "max_task_wait_time", "node.data", "node.total", "pending_tasks", "pri", "relo", "shards", "status", "timestamp", "unassign", "unassign.pri", ], ] ], t.Union[ str, t.Literal[ "active_shards_percent", "cluster", "epoch", "init", "max_task_wait_time", "node.data", "node.total", "pending_tasks", "pri", "relo", "shards", "status", "timestamp", "unassign", "unassign.pri", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, ts: t.Optional[bool] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get the cluster health status.

IMPORTANT: CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the cluster health API. This API is often used to check malfunctioning clusters. To help you track cluster health alongside log files and alerting systems, the API returns timestamps in two formats: HH:MM:SS, which is human-readable but includes no date information; Unix epoch time, which is machine-sortable and includes date information. The latter format is useful for cluster recoveries that take multiple days. You can use the cat health API to verify cluster health across multiple nodes. You also can use the API to track the recovery of a large cluster over a longer period of time.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param ts: If true, returns `HH:MM:SS` and Unix epoch timestamps. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/health" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if ts is not None: __query["ts"] = ts if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.health", path_parts=__path_parts, ) @_rewrite_parameters() def help(self) -> TextApiResponse: """ .. raw:: html

Get CAT help.

Get help for the CAT APIs.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_cat" __query: t.Dict[str, t.Any] = {} __headers = {"accept": "text/plain"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.help", path_parts=__path_parts, ) @_rewrite_parameters() def indices( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "bulk.avg_size_in_bytes", "bulk.avg_time", "bulk.total_operations", "bulk.total_size_in_bytes", "bulk.total_time", "completion.size", "creation.date", "creation.date.string", "dataset.size", "dense_vector.value_count", "docs.count", "docs.deleted", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "health", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "memory.total", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "pri", "pri.bulk.avg_size_in_bytes", "pri.bulk.avg_time", "pri.bulk.total_operations", "pri.bulk.total_size_in_bytes", "pri.bulk.total_time", "pri.completion.size", "pri.dense_vector.value_count", "pri.fielddata.evictions", "pri.fielddata.memory_size", "pri.flush.total", "pri.flush.total_time", "pri.get.current", "pri.get.exists_time", "pri.get.exists_total", "pri.get.missing_time", "pri.get.missing_total", "pri.get.time", "pri.get.total", "pri.indexing.delete_current", "pri.indexing.delete_time", "pri.indexing.delete_total", "pri.indexing.index_current", "pri.indexing.index_failed", "pri.indexing.index_failed_due_to_version_conflict", "pri.indexing.index_time", "pri.indexing.index_total", "pri.memory.total", "pri.merges.current", "pri.merges.current_docs", "pri.merges.current_size", "pri.merges.total", "pri.merges.total_docs", "pri.merges.total_size", "pri.merges.total_time", "pri.query_cache.evictions", "pri.query_cache.memory_size", "pri.refresh.external_time", "pri.refresh.external_total", "pri.refresh.listeners", "pri.refresh.time", "pri.refresh.total", "pri.request_cache.evictions", "pri.request_cache.hit_count", "pri.request_cache.memory_size", "pri.request_cache.miss_count", "pri.search.fetch_current", "pri.search.fetch_time", "pri.search.fetch_total", "pri.search.open_contexts", "pri.search.query_current", "pri.search.query_time", "pri.search.query_total", "pri.search.scroll_current", "pri.search.scroll_time", "pri.search.scroll_total", "pri.segments.count", "pri.segments.fixed_bitset_memory", "pri.segments.index_writer_memory", "pri.segments.memory", "pri.segments.version_map_memory", "pri.sparse_vector.value_count", "pri.store.size", "pri.suggest.current", "pri.suggest.time", "pri.suggest.total", "pri.warmer.current", "pri.warmer.total", "pri.warmer.total_time", "query_cache.evictions", "query_cache.memory_size", "refresh.external_time", "refresh.external_total", "refresh.listeners", "refresh.time", "refresh.total", "rep", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "sparse_vector.value_count", "status", "store.size", "suggest.current", "suggest.time", "suggest.total", "uuid", "warmer.current", "warmer.total", "warmer.total_time", ], ] ], t.Union[ str, t.Literal[ "bulk.avg_size_in_bytes", "bulk.avg_time", "bulk.total_operations", "bulk.total_size_in_bytes", "bulk.total_time", "completion.size", "creation.date", "creation.date.string", "dataset.size", "dense_vector.value_count", "docs.count", "docs.deleted", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "health", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "memory.total", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "pri", "pri.bulk.avg_size_in_bytes", "pri.bulk.avg_time", "pri.bulk.total_operations", "pri.bulk.total_size_in_bytes", "pri.bulk.total_time", "pri.completion.size", "pri.dense_vector.value_count", "pri.fielddata.evictions", "pri.fielddata.memory_size", "pri.flush.total", "pri.flush.total_time", "pri.get.current", "pri.get.exists_time", "pri.get.exists_total", "pri.get.missing_time", "pri.get.missing_total", "pri.get.time", "pri.get.total", "pri.indexing.delete_current", "pri.indexing.delete_time", "pri.indexing.delete_total", "pri.indexing.index_current", "pri.indexing.index_failed", "pri.indexing.index_failed_due_to_version_conflict", "pri.indexing.index_time", "pri.indexing.index_total", "pri.memory.total", "pri.merges.current", "pri.merges.current_docs", "pri.merges.current_size", "pri.merges.total", "pri.merges.total_docs", "pri.merges.total_size", "pri.merges.total_time", "pri.query_cache.evictions", "pri.query_cache.memory_size", "pri.refresh.external_time", "pri.refresh.external_total", "pri.refresh.listeners", "pri.refresh.time", "pri.refresh.total", "pri.request_cache.evictions", "pri.request_cache.hit_count", "pri.request_cache.memory_size", "pri.request_cache.miss_count", "pri.search.fetch_current", "pri.search.fetch_time", "pri.search.fetch_total", "pri.search.open_contexts", "pri.search.query_current", "pri.search.query_time", "pri.search.query_total", "pri.search.scroll_current", "pri.search.scroll_time", "pri.search.scroll_total", "pri.segments.count", "pri.segments.fixed_bitset_memory", "pri.segments.index_writer_memory", "pri.segments.memory", "pri.segments.version_map_memory", "pri.sparse_vector.value_count", "pri.store.size", "pri.suggest.current", "pri.suggest.time", "pri.suggest.total", "pri.warmer.current", "pri.warmer.total", "pri.warmer.total_time", "query_cache.evictions", "query_cache.memory_size", "refresh.external_time", "refresh.external_total", "refresh.listeners", "refresh.time", "refresh.total", "rep", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "sparse_vector.value_count", "status", "store.size", "suggest.current", "suggest.time", "suggest.total", "uuid", "warmer.current", "warmer.total", "warmer.total_time", ], ], ] ] = None, health: t.Optional[ t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, pri: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get index information.

Get high-level information about indices in a cluster, including backing indices for data streams.

Use this request to get the following information for each index in a cluster:

  • shard count
  • document count
  • deleted document count
  • primary store size
  • total store size of all shards, including shard replicas

These metrics are retrieved directly from Lucene, which Elasticsearch uses internally to power indexing and search. As a result, all document counts include hidden nested documents. To get an accurate count of Elasticsearch documents, use the cat count or count APIs.

CAT APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use an index endpoint.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param expand_wildcards: The type of index that wildcard patterns can match. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param health: The health status used to limit returned indices. By default, the response includes indices of any health status. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param master_timeout: Period to wait for a connection to the master node. :param pri: If true, the response only includes information from primary shards. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/indices/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/indices" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if health is not None: __query["health"] = health if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if pri is not None: __query["pri"] = pri if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.indices", path_parts=__path_parts, ) @_rewrite_parameters() def master( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["host", "id", "ip", "node"]]], t.Union[str, t.Literal["host", "id", "ip", "node"]], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get master node information.

Get information about the master node, including the ID, bound IP address, and name.

IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/master" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.master", path_parts=__path_parts, ) @_rewrite_parameters() def ml_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "create_time", "description", "dest_index", "failure_reason", "id", "model_memory_limit", "node.address", "node.ephemeral_id", "node.id", "node.name", "progress", "source_index", "state", "type", "version", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get data frame analytics jobs.

Get configuration and usage information about data frame analytics jobs.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get data frame analytics jobs statistics API.

``_ :param id: The ID of the data frame analytics to fetch :param allow_no_match: Whether to ignore if a wildcard expression matches no configs. (This includes `_all` string or when no configs have been specified) :param bytes: The unit in which to display byte values :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_cat/ml/data_frame/analytics/{__path_parts["id"]}' else: __path_parts = {} __path = "/_cat/ml/data_frame/analytics" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters() def ml_datafeeds( self, *, datafeed_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ] ], t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ] ], t.Union[ str, t.Literal[ "ae", "bc", "id", "na", "ne", "ni", "nn", "s", "sba", "sc", "seah", "st", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get datafeeds.

Get configuration and usage information about datafeeds. This API returns a maximum of 10,000 datafeeds. If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage cluster privileges to use this API.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get datafeed statistics API.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. :param allow_no_match: Specifies what to do when the request: * Contains wildcard expressions and there are no datafeeds that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty datafeeds array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_cat/ml/datafeeds/{__path_parts["datafeed_id"]}' else: __path_parts = {} __path = "/_cat/ml/datafeeds" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_datafeeds", path_parts=__path_parts, ) @_rewrite_parameters() def ml_jobs( self, *, job_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ] ], t.Union[ str, t.Literal[ "assignment_explanation", "buckets.count", "buckets.time.exp_avg", "buckets.time.exp_avg_hour", "buckets.time.max", "buckets.time.min", "buckets.time.total", "data.buckets", "data.earliest_record", "data.empty_buckets", "data.input_bytes", "data.input_fields", "data.input_records", "data.invalid_dates", "data.last", "data.last_empty_bucket", "data.last_sparse_bucket", "data.latest_record", "data.missing_fields", "data.out_of_order_timestamps", "data.processed_fields", "data.processed_records", "data.sparse_buckets", "forecasts.memory.avg", "forecasts.memory.max", "forecasts.memory.min", "forecasts.memory.total", "forecasts.records.avg", "forecasts.records.max", "forecasts.records.min", "forecasts.records.total", "forecasts.time.avg", "forecasts.time.max", "forecasts.time.min", "forecasts.time.total", "forecasts.total", "id", "model.bucket_allocation_failures", "model.by_fields", "model.bytes", "model.bytes_exceeded", "model.categorization_status", "model.categorized_doc_count", "model.dead_category_count", "model.failed_category_count", "model.frequent_category_count", "model.log_time", "model.memory_limit", "model.memory_status", "model.over_fields", "model.partition_fields", "model.rare_category_count", "model.timestamp", "model.total_category_count", "node.address", "node.ephemeral_id", "node.id", "node.name", "opened_time", "state", ], ], ] ] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get anomaly detection jobs.

Get configuration and usage information for anomaly detection jobs. This API returns a maximum of 10,000 jobs. If the Elasticsearch security features are enabled, you must have monitor_ml, monitor, manage_ml, or manage cluster privileges to use this API.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get anomaly detection job statistics API.

``_ :param job_id: Identifier for the anomaly detection job. :param allow_no_match: Specifies what to do when the request: * Contains wildcard expressions and there are no jobs that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_cat/ml/anomaly_detectors/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_cat/ml/anomaly_detectors" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_jobs", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def ml_trained_models( self, *, model_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, from_: t.Optional[int] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ] ], t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ] ], t.Union[ str, t.Literal[ "create_time", "created_by", "data_frame_analytics_id", "description", "heap_size", "id", "ingest.count", "ingest.current", "ingest.failed", "ingest.pipelines", "ingest.time", "license", "operations", "version", ], ], ] ] = None, size: t.Optional[int] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get trained models.

Get configuration and usage information about inference trained models.

IMPORTANT: CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get trained models statistics API.

``_ :param model_id: A unique identifier for the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no models that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a 404 status code when there are no matches or only partial matches. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param from_: Skips the specified number of transforms. :param h: A comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: A comma-separated list of column names or aliases used to sort the response. :param size: The maximum number of transforms to display. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_cat/ml/trained_models/{__path_parts["model_id"]}' else: __path_parts = {} __path = "/_cat/ml/trained_models" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if from_ is not None: __query["from"] = from_ if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.ml_trained_models", path_parts=__path_parts, ) @_rewrite_parameters() def nodeattrs( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "attr", "host", "id", "ip", "node", "pid", "port", "value" ], ] ], t.Union[ str, t.Literal[ "attr", "host", "id", "ip", "node", "pid", "port", "value" ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get node attribute information.

Get information about custom node attributes. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/nodeattrs" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.nodeattrs", path_parts=__path_parts, ) @_rewrite_parameters() def nodes( self, *, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, full_id: t.Optional[t.Union[bool, str]] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "build", "completion.size", "cpu", "disk.avail", "disk.total", "disk.used", "disk.used_percent", "fielddata.evictions", "fielddata.memory_size", "file_desc.current", "file_desc.max", "file_desc.percent", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "heap.current", "heap.max", "heap.percent", "http_address", "id", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "jdk", "load_15m", "load_1m", "load_5m", "mappings.total_count", "mappings.total_estimated_overhead_in_bytes", "master", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "name", "node.role", "pid", "port", "query_cache.evictions", "query_cache.hit_count", "query_cache.memory_size", "query_cache.miss_count", "ram.current", "ram.max", "ram.percent", "refresh.time", "refresh.total", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "script.cache_evictions", "script.compilations", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "shard_stats.total_count", "suggest.current", "suggest.time", "suggest.total", "uptime", "version", ], ] ], t.Union[ str, t.Literal[ "build", "completion.size", "cpu", "disk.avail", "disk.total", "disk.used", "disk.used_percent", "fielddata.evictions", "fielddata.memory_size", "file_desc.current", "file_desc.max", "file_desc.percent", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "heap.current", "heap.max", "heap.percent", "http_address", "id", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "jdk", "load_15m", "load_1m", "load_5m", "mappings.total_count", "mappings.total_estimated_overhead_in_bytes", "master", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "name", "node.role", "pid", "port", "query_cache.evictions", "query_cache.hit_count", "query_cache.memory_size", "query_cache.miss_count", "ram.current", "ram.max", "ram.percent", "refresh.time", "refresh.total", "request_cache.evictions", "request_cache.hit_count", "request_cache.memory_size", "request_cache.miss_count", "script.cache_evictions", "script.compilations", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "shard_stats.total_count", "suggest.current", "suggest.time", "suggest.total", "uptime", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get node information.

Get information about the nodes in a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param full_id: If `true`, return the full node ID. If `false`, return the shortened node ID. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/nodes" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if full_id is not None: __query["full_id"] = full_id if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.nodes", path_parts=__path_parts, ) @_rewrite_parameters() def pending_tasks( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal["insertOrder", "priority", "source", "timeInQueue"], ] ], t.Union[ str, t.Literal["insertOrder", "priority", "source", "timeInQueue"] ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get pending task information.

Get information about cluster-level changes that have not yet taken effect. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the pending cluster tasks API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/pending_tasks" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.pending_tasks", path_parts=__path_parts, ) @_rewrite_parameters() def plugins( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal["component", "description", "id", "name", "version"], ] ], t.Union[ str, t.Literal["component", "description", "id", "name", "version"] ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, include_bootstrap: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get plugin information.

Get a list of plugins running on each node of a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param include_bootstrap: Include bootstrap plugins in the response :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/plugins" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if include_bootstrap is not None: __query["include_bootstrap"] = include_bootstrap if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.plugins", path_parts=__path_parts, ) @_rewrite_parameters() def recovery( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "bytes", "bytes_percent", "bytes_recovered", "bytes_total", "files", "files_percent", "files_recovered", "files_total", "index", "repository", "shard", "snapshot", "source_host", "source_node", "stage", "start_time", "start_time_millis", "stop_time", "stop_time_millis", "target_host", "target_node", "time", "translog_ops", "translog_ops_percent", "translog_ops_recovered", "type", ], ] ], t.Union[ str, t.Literal[ "bytes", "bytes_percent", "bytes_recovered", "bytes_total", "files", "files_percent", "files_recovered", "files_total", "index", "repository", "shard", "snapshot", "source_host", "source_node", "stage", "start_time", "start_time_millis", "stop_time", "stop_time_millis", "target_host", "target_node", "time", "translog_ops", "translog_ops_percent", "translog_ops_recovered", "type", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard recovery information.

Get information about ongoing and completed shard recoveries. Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or syncing a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing. For data streams, the API returns information about the stream’s backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index recovery API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. :param bytes: The unit used to display byte values. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/recovery/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/recovery" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if bytes is not None: __query["bytes"] = bytes if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.recovery", path_parts=__path_parts, ) @_rewrite_parameters() def repositories( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[t.Union[str, t.Sequence[str]]] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get snapshot repository information.

Get a list of snapshot repositories for a cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot repository API.

``_ :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/repositories" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.repositories", path_parts=__path_parts, ) @_rewrite_parameters() def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "committed", "compound", "docs.count", "docs.deleted", "generation", "id", "index", "ip", "prirep", "searchable", "segment", "shard", "size", "size.memory", "version", ], ] ], t.Union[ str, t.Literal[ "committed", "compound", "docs.count", "docs.deleted", "generation", "id", "index", "ip", "prirep", "searchable", "segment", "shard", "size", "size.memory", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get segment information.

Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the index segments API.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/segments/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/segments" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.segments", path_parts=__path_parts, ) @_rewrite_parameters() def shards( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, bytes: t.Optional[ t.Union[str, t.Literal["b", "gb", "kb", "mb", "pb", "tb"]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "completion.size", "dataset.size", "dense_vector.value_count", "docs", "dsparse_vector.value_count", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "id", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "node", "prirep", "query_cache.evictions", "query_cache.memory_size", "recoverysource.type", "refresh.time", "refresh.total", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "seq_no.global_checkpoint", "seq_no.local_checkpoint", "seq_no.max", "shard", "state", "store", "suggest.current", "suggest.time", "suggest.total", "sync_id", "unassigned.at", "unassigned.details", "unassigned.for", "unassigned.reason", ], ] ], t.Union[ str, t.Literal[ "completion.size", "dataset.size", "dense_vector.value_count", "docs", "dsparse_vector.value_count", "fielddata.evictions", "fielddata.memory_size", "flush.total", "flush.total_time", "get.current", "get.exists_time", "get.exists_total", "get.missing_time", "get.missing_total", "get.time", "get.total", "id", "index", "indexing.delete_current", "indexing.delete_time", "indexing.delete_total", "indexing.index_current", "indexing.index_failed", "indexing.index_failed_due_to_version_conflict", "indexing.index_time", "indexing.index_total", "ip", "merges.current", "merges.current_docs", "merges.current_size", "merges.total", "merges.total_docs", "merges.total_size", "merges.total_time", "node", "prirep", "query_cache.evictions", "query_cache.memory_size", "recoverysource.type", "refresh.time", "refresh.total", "search.fetch_current", "search.fetch_time", "search.fetch_total", "search.open_contexts", "search.query_current", "search.query_time", "search.query_total", "search.scroll_current", "search.scroll_time", "search.scroll_total", "segments.count", "segments.fixed_bitset_memory", "segments.index_writer_memory", "segments.memory", "segments.version_map_memory", "seq_no.global_checkpoint", "seq_no.local_checkpoint", "seq_no.max", "shard", "state", "store", "suggest.current", "suggest.time", "suggest.total", "sync_id", "unassigned.at", "unassigned.details", "unassigned.for", "unassigned.reason", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get shard information.

Get information about the shards in a cluster. For data streams, the API returns information about the backing indices. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications.

``_ :param index: A comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param bytes: The unit used to display byte values. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cat/shards/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cat/shards" __query: t.Dict[str, t.Any] = {} if bytes is not None: __query["bytes"] = bytes if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.shards", path_parts=__path_parts, ) @_rewrite_parameters() def snapshots( self, *, repository: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "duration", "end_epoch", "end_time", "failed_shards", "id", "indices", "reason", "repository", "start_epoch", "start_time", "status", "successful_shards", "total_shards", ], ] ], t.Union[ str, t.Literal[ "duration", "end_epoch", "end_time", "failed_shards", "id", "indices", "reason", "repository", "start_epoch", "start_time", "status", "successful_shards", "total_shards", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get snapshot information.

Get information about the snapshots stored in one or more repositories. A snapshot is a backup of an index or running Elasticsearch cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get snapshot API.

``_ :param repository: A comma-separated list of snapshot repositories used to limit the request. Accepts wildcard expressions. `_all` returns all repositories. If any repository fails during the request, Elasticsearch returns an error. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param ignore_unavailable: If `true`, the response does not include information from unavailable snapshots. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if repository not in SKIP_IN_PATH: __path_parts = {"repository": _quote(repository)} __path = f'/_cat/snapshots/{__path_parts["repository"]}' else: __path_parts = {} __path = "/_cat/snapshots" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.snapshots", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def tasks( self, *, actions: t.Optional[t.Sequence[str]] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "action", "id", "ip", "node", "node_id", "parent_task_id", "port", "running_time", "running_time_ns", "start_time", "task_id", "timestamp", "type", "version", "x_opaque_id", ], ] ], t.Union[ str, t.Literal[ "action", "id", "ip", "node", "node_id", "parent_task_id", "port", "running_time", "running_time_ns", "start_time", "task_id", "timestamp", "type", "version", "x_opaque_id", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, v: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get task information.

Get information about tasks currently running in the cluster. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the task management API.

``_ :param actions: The task action names, which are used to limit the response. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param nodes: Unique node identifiers, which are used to limit the response. :param parent_task_id: The parent task identifier, which is used to limit the response. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: Unit used to display time values. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param v: When set to `true` will enable verbose output. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_cat/tasks" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if timeout is not None: __query["timeout"] = timeout if v is not None: __query["v"] = v if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.tasks", path_parts=__path_parts, ) @_rewrite_parameters() def templates( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "composed_of", "index_patterns", "name", "order", "version" ], ] ], t.Union[ str, t.Literal[ "composed_of", "index_patterns", "name", "order", "version" ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get index template information.

Get information about the index templates in a cluster. You can use index templates to apply index settings and field mappings to new indices at creation. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the get index template API.

``_ :param name: The name of the template to return. Accepts wildcard expressions. If omitted, all templates are returned. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: A comma-separated list of columns names to display. It supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: Period to wait for a connection to the master node. :param s: List of columns that determine how the table should be sorted. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_cat/templates/{__path_parts["name"]}' else: __path_parts = {} __path = "/_cat/templates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.templates", path_parts=__path_parts, ) @_rewrite_parameters() def thread_pool( self, *, thread_pool_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "active", "completed", "core", "ephemeral_id", "host", "ip", "keep_alive", "largest", "max", "name", "node_id", "node_name", "pid", "pool_size", "port", "queue", "queue_size", "rejected", "size", "type", ], ] ], t.Union[ str, t.Literal[ "active", "completed", "core", "ephemeral_id", "host", "ip", "keep_alive", "largest", "max", "name", "node_id", "node_name", "pid", "pool_size", "port", "queue", "queue_size", "rejected", "size", "type", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, s: t.Optional[t.Union[str, t.Sequence[str]]] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get thread pool statistics.

Get thread pool statistics for each node in a cluster. Returned information includes all built-in thread pools and custom thread pools. IMPORTANT: cat APIs are only intended for human consumption using the command line or Kibana console. They are not intended for use by applications. For application consumption, use the nodes info API.

``_ :param thread_pool_patterns: A comma-separated list of thread pool names used to limit the request. Accepts wildcard expressions. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param h: List of columns to appear in the response. Supports simple wildcards. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param local: If `true`, the request computes the list of selected nodes from the local cluster state. If `false` the list of selected nodes are computed from the cluster state of the master node. In both cases the coordinating node will send requests for further information to each selected node. :param master_timeout: The period to wait for a connection to the master node. :param s: A comma-separated list of column names or aliases that determines the sort order. Sorting defaults to ascending and can be changed by setting `:asc` or `:desc` as a suffix to the column name. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if thread_pool_patterns not in SKIP_IN_PATH: __path_parts = {"thread_pool_patterns": _quote(thread_pool_patterns)} __path = f'/_cat/thread_pool/{__path_parts["thread_pool_patterns"]}' else: __path_parts = {} __path = "/_cat/thread_pool" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.thread_pool", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def transforms( self, *, transform_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, from_: t.Optional[int] = None, h: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ] ], t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ], ] ] = None, help: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, s: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ] ], t.Union[ str, t.Literal[ "changes_last_detection_time", "checkpoint", "checkpoint_duration_time_exp_avg", "checkpoint_progress", "create_time", "delete_time", "description", "dest_index", "docs_per_second", "documents_deleted", "documents_indexed", "documents_processed", "frequency", "id", "index_failure", "index_time", "index_total", "indexed_documents_exp_avg", "last_search_time", "max_page_search_size", "pages_processed", "pipeline", "processed_documents_exp_avg", "processing_time", "reason", "search_failure", "search_time", "search_total", "source_index", "state", "transform_type", "trigger_count", "version", ], ], ] ] = None, size: t.Optional[int] = None, time: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, v: t.Optional[bool] = None, ) -> t.Union[ObjectApiResponse[t.Any], TextApiResponse]: """ .. raw:: html

Get transform information.

Get configuration and usage information about transforms.

CAT APIs are only intended for human consumption using the Kibana console or command line. They are not intended for use by applications. For application consumption, use the get transform statistics API.

``_ :param transform_id: A transform identifier or a wildcard expression. If you do not specify one of these options, the API returns information for all transforms. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If `true`, it returns an empty transforms array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. :param format: Specifies the format to return the columnar data in, can be set to `text`, `json`, `cbor`, `yaml`, or `smile`. :param from_: Skips the specified number of transforms. :param h: Comma-separated list of column names to display. :param help: When set to `true` will output available columns. This option can't be combined with any other query string option. :param s: Comma-separated list of column names or column aliases used to sort the response. :param size: The maximum number of transforms to obtain. :param time: The unit used to display time values. :param v: When set to `true` will enable verbose output. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_cat/transforms/{__path_parts["transform_id"]}' else: __path_parts = {} __path = "/_cat/transforms" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if from_ is not None: __query["from"] = from_ if h is not None: __query["h"] = h if help is not None: __query["help"] = help if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if s is not None: __query["s"] = s if size is not None: __query["size"] = size if time is not None: __query["time"] = time if v is not None: __query["v"] = v __headers = {"accept": "text/plain,application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cat.transforms", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/ccr.py000066400000000000000000001411071506101734100242010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class CcrClient(NamespacedClient): @_rewrite_parameters() def delete_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete auto-follow patterns.

Delete a collection of cross-cluster replication auto-follow patterns.

``_ :param name: The auto-follow pattern collection to delete. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ccr.delete_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "leader_index", "remote_cluster", "data_stream_name", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", "settings", ), ) def follow( self, *, index: str, leader_index: t.Optional[str] = None, remote_cluster: t.Optional[str] = None, data_stream_name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a follower. Create a cross-cluster replication follower index that follows a specific leader index. When the API returns, the follower index exists and cross-cluster replication starts replicating operations from the leader index to the follower index.

``_ :param index: The name of the follower index. :param leader_index: The name of the index in the leader cluster to follow. :param remote_cluster: The remote cluster containing the leader index. :param data_stream_name: If the leader index is part of a data stream, the name to which the local data stream for the followed index should be renamed. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding write requests on the follower. :param max_read_request_operation_count: The maximum number of operations to pull per read from the remote cluster. :param max_read_request_size: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. :param max_retry_delay: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. :param max_write_buffer_count: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. :param max_write_buffer_size: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. :param max_write_request_operation_count: The maximum number of operations per bulk write request executed on the follower. :param max_write_request_size: The maximum total bytes of operations per bulk write request executed on the follower. :param read_poll_timeout: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. :param settings: Settings to override from the leader index. :param wait_for_active_shards: Specifies the number of shards to wait on being active before responding. This defaults to waiting on none of the shards to be active. A shard must be restored from the leader index before being active. Restoring a follower shard requires transferring all the remote Lucene segment files to the follower index. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if leader_index is None and body is None: raise ValueError("Empty value passed for parameter 'leader_index'") if remote_cluster is None and body is None: raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/follow' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if leader_index is not None: __body["leader_index"] = leader_index if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if data_stream_name is not None: __body["data_stream_name"] = data_stream_name if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if settings is not None: __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.follow", path_parts=__path_parts, ) @_rewrite_parameters() def follow_info( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get follower information.

Get information about all cross-cluster replication follower indices. For example, the results include follower index names, leader index names, replication options, and whether the follower indices are active or paused.

``_ :param index: A comma-delimited list of follower index patterns. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/info' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.follow_info", path_parts=__path_parts, ) @_rewrite_parameters() def follow_stats( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get follower stats.

Get cross-cluster replication follower stats. The API returns shard-level stats about the "following tasks" associated with each shard for the specified indices.

``_ :param index: A comma-delimited list of index patterns. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/stats' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.follow_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "follower_cluster", "follower_index", "follower_index_uuid", "leader_remote_cluster", ), ) def forget_follower( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, follower_cluster: t.Optional[str] = None, follower_index: t.Optional[str] = None, follower_index_uuid: t.Optional[str] = None, human: t.Optional[bool] = None, leader_remote_cluster: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Forget a follower. Remove the cross-cluster replication follower retention leases from the leader.

A following index takes out retention leases on its leader index. These leases are used to increase the likelihood that the shards of the leader index retain the history of operations that the shards of the following index need to run replication. When a follower index is converted to a regular index by the unfollow API (either by directly calling the API or by index lifecycle management tasks), these leases are removed. However, removal of the leases can fail, for example when the remote cluster containing the leader index is unavailable. While the leases will eventually expire on their own, their extended existence can cause the leader index to hold more history than necessary and prevent index lifecycle management from performing some operations on the leader index. This API exists to enable manually removing the leases when the unfollow API is unable to do so.

NOTE: This API does not stop replication by a following index. If you use this API with a follower index that is still actively following, the following index will add back retention leases on the leader. The only purpose of this API is to handle the case of failure to remove the following retention leases after the unfollow API is invoked.

``_ :param index: the name of the leader index for which specified follower retention leases should be removed :param follower_cluster: :param follower_index: :param follower_index_uuid: :param leader_remote_cluster: :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/forget_follower' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if follower_cluster is not None: __body["follower_cluster"] = follower_cluster if follower_index is not None: __body["follower_index"] = follower_index if follower_index_uuid is not None: __body["follower_index_uuid"] = follower_index_uuid if leader_remote_cluster is not None: __body["leader_remote_cluster"] = leader_remote_cluster __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.forget_follower", path_parts=__path_parts, ) @_rewrite_parameters() def get_auto_follow_pattern( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get auto-follow patterns.

Get cross-cluster replication auto-follow patterns.

``_ :param name: The auto-follow pattern collection that you want to retrieve. If you do not specify a name, the API returns information for all collections. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' else: __path_parts = {} __path = "/_ccr/auto_follow" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.get_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() def pause_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Pause an auto-follow pattern.

Pause a cross-cluster replication auto-follow pattern. When the API returns, the auto-follow pattern is inactive. New indices that are created on the remote cluster and match the auto-follow patterns are ignored.

You can resume auto-following with the resume auto-follow pattern API. When it resumes, the auto-follow pattern is active again and automatically configures follower indices for newly created indices on the remote cluster that match its patterns. Remote indices that were created while the pattern was paused will also be followed, unless they have been deleted or closed in the interim.

``_ :param name: The name of the auto-follow pattern to pause. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}/pause' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.pause_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() def pause_follow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Pause a follower.

Pause a cross-cluster replication follower index. The follower index will not fetch any additional operations from the leader index. You can resume following with the resume follower API. You can pause and resume a follower index to change the configuration of the following task.

``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/pause_follow' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.pause_follow", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "remote_cluster", "follow_index_pattern", "leader_index_exclusion_patterns", "leader_index_patterns", "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", "settings", ), ) def put_auto_follow_pattern( self, *, name: str, remote_cluster: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, follow_index_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, leader_index_exclusion_patterns: t.Optional[t.Sequence[str]] = None, leader_index_patterns: t.Optional[t.Sequence[str]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[t.Union[int, str]] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[t.Union[int, str]] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update auto-follow patterns. Create a collection of cross-cluster replication auto-follow patterns for a remote cluster. Newly created indices on the remote cluster that match any of the patterns are automatically configured as follower indices. Indices on the remote cluster that were created before the auto-follow pattern was created will not be auto-followed even if they match the pattern.

This API can also be used to update auto-follow patterns. NOTE: Follower indices that were configured automatically before updating an auto-follow pattern will remain unchanged even if they do not match against the new patterns.

``_ :param name: The name of the collection of auto-follow patterns. :param remote_cluster: The remote cluster containing the leader indices to match against. :param follow_index_pattern: The name of follower index. The template {{leader_index}} can be used to derive the name of the follower index from the name of the leader index. When following a data stream, use {{leader_index}}; CCR does not support changes to the names of a follower data stream’s backing indices. :param leader_index_exclusion_patterns: An array of simple index patterns that can be used to exclude indices from being auto-followed. Indices in the remote cluster whose names are matching one or more leader_index_patterns and one or more leader_index_exclusion_patterns won’t be followed. :param leader_index_patterns: An array of simple index patterns to match against indices in the remote cluster specified by the remote_cluster field. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_outstanding_write_requests: The maximum number of outstanding reads requests from the remote cluster. :param max_read_request_operation_count: The maximum number of operations to pull per read from the remote cluster. :param max_read_request_size: The maximum size in bytes of per read of a batch of operations pulled from the remote cluster. :param max_retry_delay: The maximum time to wait before retrying an operation that failed exceptionally. An exponential backoff strategy is employed when retrying. :param max_write_buffer_count: The maximum number of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the number of queued operations goes below the limit. :param max_write_buffer_size: The maximum total bytes of operations that can be queued for writing. When this limit is reached, reads from the remote cluster will be deferred until the total bytes of queued operations goes below the limit. :param max_write_request_operation_count: The maximum number of operations per bulk write request executed on the follower. :param max_write_request_size: The maximum total bytes of operations per bulk write request executed on the follower. :param read_poll_timeout: The maximum time to wait for new operations on the remote cluster when the follower index is synchronized with the leader index. When the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics. Then the follower will immediately attempt to read from the leader again. :param settings: Settings to override from the leader index. Note that certain settings can not be overrode (e.g., index.number_of_shards). """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if remote_cluster is None and body is None: raise ValueError("Empty value passed for parameter 'remote_cluster'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if follow_index_pattern is not None: __body["follow_index_pattern"] = follow_index_pattern if leader_index_exclusion_patterns is not None: __body["leader_index_exclusion_patterns"] = ( leader_index_exclusion_patterns ) if leader_index_patterns is not None: __body["leader_index_patterns"] = leader_index_patterns if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if settings is not None: __body["settings"] = settings __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.put_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters() def resume_auto_follow_pattern( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resume an auto-follow pattern.

Resume a cross-cluster replication auto-follow pattern that was paused. The auto-follow pattern will resume configuring following indices for newly created indices that match its patterns on the remote cluster. Remote indices created while the pattern was paused will also be followed unless they have been deleted or closed in the interim.

``_ :param name: The name of the auto-follow pattern to resume. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_ccr/auto_follow/{__path_parts["name"]}/resume' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.resume_auto_follow_pattern", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "max_outstanding_read_requests", "max_outstanding_write_requests", "max_read_request_operation_count", "max_read_request_size", "max_retry_delay", "max_write_buffer_count", "max_write_buffer_size", "max_write_request_operation_count", "max_write_request_size", "read_poll_timeout", ), ) def resume_follow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_outstanding_read_requests: t.Optional[int] = None, max_outstanding_write_requests: t.Optional[int] = None, max_read_request_operation_count: t.Optional[int] = None, max_read_request_size: t.Optional[str] = None, max_retry_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, max_write_buffer_count: t.Optional[int] = None, max_write_buffer_size: t.Optional[str] = None, max_write_request_operation_count: t.Optional[int] = None, max_write_request_size: t.Optional[str] = None, pretty: t.Optional[bool] = None, read_poll_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resume a follower. Resume a cross-cluster replication follower index that was paused. The follower index could have been paused with the pause follower API. Alternatively it could be paused due to replication that cannot be retried due to failures during following tasks. When this API returns, the follower index will resume fetching operations from the leader index.

``_ :param index: The name of the follow index to resume following. :param master_timeout: Period to wait for a connection to the master node. :param max_outstanding_read_requests: :param max_outstanding_write_requests: :param max_read_request_operation_count: :param max_read_request_size: :param max_retry_delay: :param max_write_buffer_count: :param max_write_buffer_size: :param max_write_request_operation_count: :param max_write_request_size: :param read_poll_timeout: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/resume_follow' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if max_outstanding_read_requests is not None: __body["max_outstanding_read_requests"] = max_outstanding_read_requests if max_outstanding_write_requests is not None: __body["max_outstanding_write_requests"] = ( max_outstanding_write_requests ) if max_read_request_operation_count is not None: __body["max_read_request_operation_count"] = ( max_read_request_operation_count ) if max_read_request_size is not None: __body["max_read_request_size"] = max_read_request_size if max_retry_delay is not None: __body["max_retry_delay"] = max_retry_delay if max_write_buffer_count is not None: __body["max_write_buffer_count"] = max_write_buffer_count if max_write_buffer_size is not None: __body["max_write_buffer_size"] = max_write_buffer_size if max_write_request_operation_count is not None: __body["max_write_request_operation_count"] = ( max_write_request_operation_count ) if max_write_request_size is not None: __body["max_write_request_size"] = max_write_request_size if read_poll_timeout is not None: __body["read_poll_timeout"] = read_poll_timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ccr.resume_follow", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cross-cluster replication stats.

This API returns stats about auto-following and the same shard-level stats as the get follower stats API.

``_ :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ccr/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ccr.stats", path_parts=__path_parts, ) @_rewrite_parameters() def unfollow( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Unfollow an index.

Convert a cross-cluster replication follower index to a regular index. The API stops the following task associated with a follower index and removes index metadata and settings associated with cross-cluster replication. The follower index must be paused and closed before you call the unfollow API.

info Currently cross-cluster replication does not support converting an existing regular index to a follower index. Converting a follower index to a regular index is an irreversible operation.

``_ :param index: The name of the follower index. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ccr/unfollow' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ccr.unfollow", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/cluster.py000066400000000000000000001713401506101734100251150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class ClusterClient(NamespacedClient): @_rewrite_parameters( body_fields=("current_node", "index", "primary", "shard"), ) def allocation_explain( self, *, current_node: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_disk_info: t.Optional[bool] = None, include_yes_decisions: t.Optional[bool] = None, index: t.Optional[str] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, primary: t.Optional[bool] = None, shard: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain the shard allocations. Get explanations for shard allocations in the cluster. For unassigned shards, it provides an explanation for why the shard is unassigned. For assigned shards, it provides an explanation for why the shard is remaining on its current node and has not moved or rebalanced to another node. This API can be very useful when attempting to diagnose why a shard is unassigned or why a shard continues to remain on its current node when you might expect otherwise. Refer to the linked documentation for examples of how to troubleshoot allocation issues using this API.

``_ :param current_node: Specifies the node ID or the name of the node to only explain a shard that is currently located on the specified node. :param include_disk_info: If true, returns information about disk usage and shard sizes. :param include_yes_decisions: If true, returns YES decisions in explanation. :param index: Specifies the name of the index that you would like an explanation for. :param master_timeout: Period to wait for a connection to the master node. :param primary: If true, returns explanation for the primary shard for the given shard ID. :param shard: Specifies the ID of the shard that you would like an explanation for. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/allocation/explain" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_disk_info is not None: __query["include_disk_info"] = include_disk_info if include_yes_decisions is not None: __query["include_yes_decisions"] = include_yes_decisions if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if current_node is not None: __body["current_node"] = current_node if index is not None: __body["index"] = index if primary is not None: __body["primary"] = primary if shard is not None: __body["shard"] = shard if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.allocation_explain", path_parts=__path_parts, ) @_rewrite_parameters() def delete_component_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete component templates. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

``_ :param name: Comma-separated list or wildcard expression of component template names used to limit the request. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="cluster.delete_component_template", path_parts=__path_parts, ) @_rewrite_parameters() def delete_voting_config_exclusions( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_removal: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear cluster voting config exclusions. Remove master-eligible nodes from the voting configuration exclusion list.

``_ :param master_timeout: Period to wait for a connection to the master node. :param wait_for_removal: Specifies whether to wait for all excluded nodes to be removed from the cluster before clearing the voting configuration exclusions list. Defaults to true, meaning that all excluded nodes must be removed from the cluster before this API takes any action. If set to false then the voting configuration exclusions list is cleared even if some excluded nodes are still in the cluster. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/voting_config_exclusions" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_removal is not None: __query["wait_for_removal"] = wait_for_removal __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="cluster.delete_voting_config_exclusions", path_parts=__path_parts, ) @_rewrite_parameters() def exists_component_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check component templates. Returns information about whether a particular component template exists.

``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (*) expressions are supported. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="cluster.exists_component_template", path_parts=__path_parts, ) @_rewrite_parameters() def get_component_template( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings_filter: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get component templates. Get information about component templates.

``_ :param name: Comma-separated list of component template names used to limit the request. Wildcard (`*`) expressions are supported. :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: Return all default configurations for the component template (default: false) :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings_filter: Filter out results, for example to filter out sensitive information. Supports wildcards or full settings keys """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_component_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if settings_filter is not None: __query["settings_filter"] = settings_filter __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.get_component_template", path_parts=__path_parts, ) @_rewrite_parameters() def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster-wide settings.

By default, it returns only settings that have been explicitly defined.

``_ :param flat_settings: If `true`, returns settings in flat format. :param include_defaults: If `true`, also returns default values for all other cluster settings, reflecting the values in the `elasticsearch.yml` file of one of the nodes in the cluster. If the nodes in your cluster do not all have the same values in their `elasticsearch.yml` config files then the values returned by this API may vary from invocation to invocation and may not reflect the values that Elasticsearch uses in all situations. Use the `GET _nodes/settings` API to fetch the settings for each individual node in your cluster. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() def health( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, wait_for_events: t.Optional[ t.Union[ str, t.Literal["high", "immediate", "languid", "low", "normal", "urgent"], ] ] = None, wait_for_no_initializing_shards: t.Optional[bool] = None, wait_for_no_relocating_shards: t.Optional[bool] = None, wait_for_nodes: t.Optional[t.Union[int, str]] = None, wait_for_status: t.Optional[ t.Union[str, t.Literal["green", "red", "unavailable", "unknown", "yellow"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster health status.

You can also use the API to get the health status of only specified data streams and indices. For data streams, the API retrieves the health status of the stream’s backing indices.

The cluster health status is: green, yellow or red. On the shard level, a red status indicates that the specific shard is not allocated in the cluster. Yellow means that the primary shard is allocated but replicas are not. Green means that all shards are allocated. The index level status is controlled by the worst shard status.

One of the main benefits of the API is the ability to wait until the cluster reaches a certain high watermark health level. The cluster status is controlled by the worst index status.

``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams and indices in a cluster, omit this parameter or use _all or `*`. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param level: Can be one of cluster, indices or shards. Controls the details level of the health information returned. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: A number controlling to how many active shards to wait for, all to wait for all shards in the cluster to be active, or 0 to not wait. :param wait_for_events: Can be one of immediate, urgent, high, normal, low, languid. Wait until all currently queued events with the given priority are processed. :param wait_for_no_initializing_shards: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard initializations. Defaults to false, which means it will not wait for initializing shards. :param wait_for_no_relocating_shards: A boolean value which controls whether to wait (until the timeout provided) for the cluster to have no shard relocations. Defaults to false, which means it will not wait for relocating shards. :param wait_for_nodes: The request waits until the specified number N of nodes is available. It also accepts >=N, <=N, >N and yellow > red. By default, will not wait for any status. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cluster/health/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cluster/health" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if level is not None: __query["level"] = level if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if wait_for_events is not None: __query["wait_for_events"] = wait_for_events if wait_for_no_initializing_shards is not None: __query["wait_for_no_initializing_shards"] = wait_for_no_initializing_shards if wait_for_no_relocating_shards is not None: __query["wait_for_no_relocating_shards"] = wait_for_no_relocating_shards if wait_for_nodes is not None: __query["wait_for_nodes"] = wait_for_nodes if wait_for_status is not None: __query["wait_for_status"] = wait_for_status __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.health", path_parts=__path_parts, ) @_rewrite_parameters() def info( self, *, target: t.Union[ t.Sequence[ t.Union[ str, t.Literal["_all", "http", "ingest", "script", "thread_pool"] ] ], t.Union[str, t.Literal["_all", "http", "ingest", "script", "thread_pool"]], ], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster info. Returns basic information about the cluster.

``_ :param target: Limits the information returned to the specific target. Supports a comma-separated list, such as http,ingest. """ if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = {"target": _quote(target)} __path = f'/_info/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.info", path_parts=__path_parts, ) @_rewrite_parameters() def pending_tasks( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the pending cluster tasks. Get information about cluster-level changes (such as create index, update mapping, allocate or fail shard) that have not yet taken effect.

NOTE: This API returns a list of any pending updates to the cluster state. These are distinct from the tasks reported by the task management API which include periodic tasks and tasks initiated by the user, such as node stats, search queries, or create index requests. However, if a user-initiated task such as a create index command causes a cluster state update, the activity of this task might be reported by both task api and pending cluster tasks API.

``_ :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/pending_tasks" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.pending_tasks", path_parts=__path_parts, ) @_rewrite_parameters() def post_voting_config_exclusions( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, node_names: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update voting configuration exclusions. Update the cluster voting config exclusions by node IDs or node names. By default, if there are more than three master-eligible nodes in the cluster and you remove fewer than half of the master-eligible nodes in the cluster at once, the voting configuration automatically shrinks. If you want to shrink the voting configuration to contain fewer than three nodes or to remove half or more of the master-eligible nodes in the cluster at once, use this API to remove departing nodes from the voting configuration manually. The API adds an entry for each specified node to the cluster’s voting configuration exclusions list. It then waits until the cluster has reconfigured its voting configuration to exclude the specified nodes.

Clusters should have no voting configuration exclusions in normal operation. Once the excluded nodes have stopped, clear the voting configuration exclusions with DELETE /_cluster/voting_config_exclusions. This API waits for the nodes to be fully removed from the cluster before it returns. If your cluster has voting configuration exclusions for nodes that you no longer intend to remove, use DELETE /_cluster/voting_config_exclusions?wait_for_removal=false to clear the voting configuration exclusions without waiting for the nodes to leave the cluster.

A response to POST /_cluster/voting_config_exclusions with an HTTP status code of 200 OK guarantees that the node has been removed from the voting configuration and will not be reinstated until the voting configuration exclusions are cleared by calling DELETE /_cluster/voting_config_exclusions. If the call to POST /_cluster/voting_config_exclusions fails or returns a response with an HTTP status code other than 200 OK then the node may not have been removed from the voting configuration. In that case, you may safely retry the call.

NOTE: Voting exclusions are required only when you remove at least half of the master-eligible nodes from a cluster in a short time period. They are not required when removing master-ineligible nodes or when removing fewer than half of the master-eligible nodes.

``_ :param master_timeout: Period to wait for a connection to the master node. :param node_ids: A comma-separated list of the persistent ids of the nodes to exclude from the voting configuration. If specified, you may not also specify node_names. :param node_names: A comma-separated list of the names of the nodes to exclude from the voting configuration. If specified, you may not also specify node_ids. :param timeout: When adding a voting configuration exclusion, the API waits for the specified nodes to be excluded from the voting configuration before returning. If the timeout expires before the appropriate condition is satisfied, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/voting_config_exclusions" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if node_ids is not None: __query["node_ids"] = node_ids if node_names is not None: __query["node_names"] = node_names if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="cluster.post_voting_config_exclusions", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("template", "deprecated", "meta", "version"), parameter_aliases={"_meta": "meta"}, ) def put_component_template( self, *, name: str, template: t.Optional[t.Mapping[str, t.Any]] = None, cause: t.Optional[str] = None, create: t.Optional[bool] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a component template. Component templates are building blocks for constructing index templates that specify index mappings, settings, and aliases.

An index template can be composed of multiple component templates. To use a component template, specify it in an index template’s composed_of list. Component templates are only applied to new data streams and indices as part of a matching index template.

Settings and mappings specified directly in the index template or the create index request override any settings or mappings specified in a component template.

Component templates are only used during index creation. For data streams, this includes data stream creation and the creation of a stream’s backing indices. Changes to component templates do not affect existing indices, including a stream’s backing indices.

You can use C-style /* *\\/ block comments in component templates. You can include comments anywhere in the request body except before the opening curly bracket.

Applying component templates

You cannot directly apply a component template to a data stream or index. To be applied, a component template must be included in an index template's composed_of list.

``_ :param name: Name of the component template to create. Elasticsearch includes the following built-in component templates: `logs-mappings`; `logs-settings`; `metrics-mappings`; `metrics-settings`;`synthetics-mapping`; `synthetics-settings`. Elastic Agent uses these templates to configure backing indices for its data streams. If you use Elastic Agent and want to overwrite one of these templates, set the `version` for your replacement template higher than the current version. If you don’t use Elastic Agent and want to disable all built-in component and index templates, set `stack.templates.enabled` to `false` using the cluster update settings API. :param template: The template to be applied which includes mappings, settings, or aliases configuration. :param cause: User defined reason for create the component template. :param create: If `true`, this request cannot replace or update existing component templates. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the component template. It may have any contents. This map is not automatically generated by Elasticsearch. This information is stored in the cluster state, so keeping it short is preferable. To unset `_meta`, replace the template without specifying this information. :param version: Version number used to manage component templates externally. This number isn't automatically generated or incremented by Elasticsearch. To unset a version, replace the template without specifying a version. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if template is None and body is None: raise ValueError("Empty value passed for parameter 'template'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_component_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if template is not None: __body["template"] = template if deprecated is not None: __body["deprecated"] = deprecated if meta is not None: __body["_meta"] = meta if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.put_component_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("persistent", "transient"), ) def put_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, persistent: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, transient: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the cluster settings.

Configure and update dynamic settings on a running cluster. You can also configure dynamic settings locally on an unstarted or shut down node in elasticsearch.yml.

Updates made with this API can be persistent, which apply across cluster restarts, or transient, which reset after a cluster restart. You can also reset transient or persistent settings by assigning them a null value.

If you configure the same setting using multiple methods, Elasticsearch applies the settings in following order of precedence: 1) Transient setting; 2) Persistent setting; 3) elasticsearch.yml setting; 4) Default setting value. For example, you can apply a transient setting to override a persistent setting or elasticsearch.yml setting. However, a change to an elasticsearch.yml setting will not override a defined transient or persistent setting.

TIP: In Elastic Cloud, use the user settings feature to configure all cluster settings. This method automatically rejects unsafe settings that could break your cluster. If you run Elasticsearch on your own hardware, use this API to configure dynamic cluster settings. Only use elasticsearch.yml for static cluster settings and node settings. The API doesn’t require a restart and ensures a setting’s value is the same on all nodes.

WARNING: Transient cluster settings are no longer recommended. Use persistent cluster settings instead. If a cluster becomes unstable, transient settings can clear unexpectedly, resulting in a potentially undesired cluster configuration.

``_ :param flat_settings: Return settings in flat format (default: false) :param master_timeout: Explicit operation timeout for connection to master node :param persistent: The settings that persist after the cluster restarts. :param timeout: Explicit operation timeout :param transient: The settings that do not persist after the cluster restarts. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if persistent is not None: __body["persistent"] = persistent if transient is not None: __body["transient"] = transient __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.put_settings", path_parts=__path_parts, ) @_rewrite_parameters() def remote_info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get remote cluster information.

Get information about configured remote clusters. The API returns connection and endpoint information keyed by the configured remote cluster alias.

info This API returns information that reflects current state on the local cluster. The connected field does not necessarily reflect whether a remote cluster is down or unavailable, only whether there is currently an open connection to it. Elasticsearch does not spontaneously try to reconnect to a disconnected remote cluster. To trigger a reconnection, attempt a cross-cluster search, ES|QL cross-cluster search, or try the resolve cluster endpoint.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_remote/info" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.remote_info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("commands",), ) def reroute( self, *, commands: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, retry_failed: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reroute the cluster. Manually change the allocation of individual shards in the cluster. For example, a shard can be moved from one node to another explicitly, an allocation can be canceled, and an unassigned shard can be explicitly allocated to a specific node.

It is important to note that after processing any reroute commands Elasticsearch will perform rebalancing as normal (respecting the values of settings such as cluster.routing.rebalance.enable) in order to remain in a balanced state. For example, if the requested allocation includes moving a shard from node1 to node2 then this may cause a shard to be moved from node2 back to node1 to even things out.

The cluster can be set to disable allocations using the cluster.routing.allocation.enable setting. If allocations are disabled then the only allocations that will be performed are explicit ones given using the reroute command, and consequent allocations due to rebalancing.

The cluster will attempt to allocate a shard a maximum of index.allocation.max_retries times in a row (defaults to 5), before giving up and leaving the shard unallocated. This scenario can be caused by structural problems such as having an analyzer which refers to a stopwords file which doesn’t exist on all nodes.

Once the problem has been corrected, allocation can be manually retried by calling the reroute API with the ?retry_failed URI query parameter, which will attempt a single retry round for these shards.

``_ :param commands: Defines the commands to perform. :param dry_run: If true, then the request simulates the operation. It will calculate the result of applying the commands to the current cluster state and return the resulting cluster state after the commands (and rebalancing) have been applied; it will not actually perform the requested changes. :param explain: If true, then the response contains an explanation of why the commands can or cannot run. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param metric: Limits the information returned to the specified metrics. :param retry_failed: If true, then retries allocation of shards that are blocked due to too many subsequent allocation failures. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_cluster/reroute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if metric is not None: __query["metric"] = metric if pretty is not None: __query["pretty"] = pretty if retry_failed is not None: __query["retry_failed"] = retry_failed if timeout is not None: __query["timeout"] = timeout if not __body: if commands is not None: __body["commands"] = commands if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="cluster.reroute", path_parts=__path_parts, ) @_rewrite_parameters() def state( self, *, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_metadata_version: t.Optional[int] = None, wait_for_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the cluster state. Get comprehensive information about the state of the cluster.

The cluster state is an internal data structure which keeps track of a variety of information needed by every node, including the identity and attributes of the other nodes in the cluster; cluster-wide settings; index metadata, including the mapping and settings for each index; the location and status of every shard copy in the cluster.

The elected master node ensures that every node in the cluster has a copy of the same cluster state. This API lets you retrieve a representation of this internal state for debugging or diagnostic purposes. You may need to consult the Elasticsearch source code to determine the precise meaning of the response.

By default the API will route requests to the elected master node since this node is the authoritative source of cluster states. You can also retrieve the cluster state held on the node handling the API request by adding the ?local=true query parameter.

Elasticsearch may need to expend significant effort to compute a response to this API in larger clusters, and the response may comprise a very large quantity of data. If you use this API repeatedly, your cluster may become unstable.

WARNING: The response is a representation of an internal data structure. Its format is not subject to the same compatibility guarantees as other more stable APIs and may change from version to version. Do not query this API using external monitoring tools. Instead, obtain the information you require using other more stable cluster APIs.

``_ :param metric: Limit the information returned to the specified metrics :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param flat_settings: Return settings in flat format (default: false) :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param local: Return local information, do not retrieve the state from master node (default: false) :param master_timeout: Specify timeout for connection to master :param wait_for_metadata_version: Wait for the metadata version to be equal or greater than the specified metadata version :param wait_for_timeout: The maximum time to wait for wait_for_metadata_version before timing out """ __path_parts: t.Dict[str, str] if metric not in SKIP_IN_PATH and index not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric), "index": _quote(index)} __path = f'/_cluster/state/{__path_parts["metric"]}/{__path_parts["index"]}' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_cluster/state/{__path_parts["metric"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_cluster/state/_all/{__path_parts["index"]}' else: __path_parts = {} __path = "/_cluster/state" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_metadata_version is not None: __query["wait_for_metadata_version"] = wait_for_metadata_version if wait_for_timeout is not None: __query["wait_for_timeout"] = wait_for_timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.state", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_remotes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster statistics. Get basic index metrics (shard numbers, store size, memory usage) and information about the current nodes that form the cluster (number, roles, os, jvm versions, memory usage, cpu and installed plugins).

``_ :param node_id: Comma-separated list of node filters used to limit returned information. Defaults to all nodes in the cluster. :param include_remotes: Include remote cluster data into the response :param timeout: Period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its stats. However, timed out nodes are included in the response’s `_nodes.failed` property. Defaults to no timeout. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_cluster/stats/nodes/{__path_parts["node_id"]}' else: __path_parts = {} __path = "/_cluster/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_remotes is not None: __query["include_remotes"] = include_remotes if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="cluster.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/connector.py000066400000000000000000002315611506101734100254300ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class ConnectorClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def check_in( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check in a connector.

Update the last_seen field in the connector and set it to the current timestamp.

``_ :param connector_id: The unique identifier of the connector to be checked in """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_check_in' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.check_in", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def delete( self, *, connector_id: str, delete_sync_jobs: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, hard: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a connector.

Removes a connector and associated sync jobs. This is a destructive action that is not recoverable. NOTE: This action doesn’t delete any API keys, ingest pipelines, or data indices associated with the connector. These need to be removed manually.

``_ :param connector_id: The unique identifier of the connector to be deleted :param delete_sync_jobs: A flag indicating if associated sync jobs should be also removed. Defaults to false. :param hard: A flag indicating if the connector should be hard deleted. """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' __query: t.Dict[str, t.Any] = {} if delete_sync_jobs is not None: __query["delete_sync_jobs"] = delete_sync_jobs if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if hard is not None: __query["hard"] = hard if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="connector.delete", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def get( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_deleted: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a connector.

Get the details about a connector.

``_ :param connector_id: The unique identifier of the connector :param include_deleted: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_deleted is not None: __query["include_deleted"] = include_deleted if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.get", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "last_access_control_sync_error", "last_access_control_sync_scheduled_at", "last_access_control_sync_status", "last_deleted_document_count", "last_incremental_sync_scheduled_at", "last_indexed_document_count", "last_seen", "last_sync_error", "last_sync_scheduled_at", "last_sync_status", "last_synced", "sync_cursor", ), ) @_stability_warning(Stability.EXPERIMENTAL) def last_sync( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, last_access_control_sync_error: t.Optional[str] = None, last_access_control_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_access_control_sync_status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, last_deleted_document_count: t.Optional[int] = None, last_incremental_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_indexed_document_count: t.Optional[int] = None, last_seen: t.Optional[t.Union[str, t.Any]] = None, last_sync_error: t.Optional[str] = None, last_sync_scheduled_at: t.Optional[t.Union[str, t.Any]] = None, last_sync_status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, last_synced: t.Optional[t.Union[str, t.Any]] = None, pretty: t.Optional[bool] = None, sync_cursor: t.Optional[t.Any] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector last sync stats.

Update the fields related to the last sync of a connector. This action is used for analytics and monitoring.

``_ :param connector_id: The unique identifier of the connector to be updated :param last_access_control_sync_error: :param last_access_control_sync_scheduled_at: :param last_access_control_sync_status: :param last_deleted_document_count: :param last_incremental_sync_scheduled_at: :param last_indexed_document_count: :param last_seen: :param last_sync_error: :param last_sync_scheduled_at: :param last_sync_status: :param last_synced: :param sync_cursor: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_last_sync' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if last_access_control_sync_error is not None: __body["last_access_control_sync_error"] = ( last_access_control_sync_error ) if last_access_control_sync_scheduled_at is not None: __body["last_access_control_sync_scheduled_at"] = ( last_access_control_sync_scheduled_at ) if last_access_control_sync_status is not None: __body["last_access_control_sync_status"] = ( last_access_control_sync_status ) if last_deleted_document_count is not None: __body["last_deleted_document_count"] = last_deleted_document_count if last_incremental_sync_scheduled_at is not None: __body["last_incremental_sync_scheduled_at"] = ( last_incremental_sync_scheduled_at ) if last_indexed_document_count is not None: __body["last_indexed_document_count"] = last_indexed_document_count if last_seen is not None: __body["last_seen"] = last_seen if last_sync_error is not None: __body["last_sync_error"] = last_sync_error if last_sync_scheduled_at is not None: __body["last_sync_scheduled_at"] = last_sync_scheduled_at if last_sync_status is not None: __body["last_sync_status"] = last_sync_status if last_synced is not None: __body["last_synced"] = last_synced if sync_cursor is not None: __body["sync_cursor"] = sync_cursor __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.last_sync", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) def list( self, *, connector_name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, include_deleted: t.Optional[bool] = None, index_name: t.Optional[t.Union[str, t.Sequence[str]]] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, service_type: t.Optional[t.Union[str, t.Sequence[str]]] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all connectors.

Get information about all connectors.

``_ :param connector_name: A comma-separated list of connector names to fetch connector documents for :param from_: Starting offset (default: 0) :param include_deleted: A flag to indicate if the desired connector should be fetched, even if it was soft-deleted. :param index_name: A comma-separated list of connector index names to fetch connector documents for :param query: A wildcard query string that filters connectors with matching name, description or index name :param service_type: A comma-separated list of connector service types to fetch connector documents for :param size: Specifies a max number of results to get """ __path_parts: t.Dict[str, str] = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} if connector_name is not None: __query["connector_name"] = connector_name if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if include_deleted is not None: __query["include_deleted"] = include_deleted if index_name is not None: __query["index_name"] = index_name if pretty is not None: __query["pretty"] = pretty if query is not None: __query["query"] = query if service_type is not None: __query["service_type"] = service_type if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.list", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "index_name", "is_native", "language", "name", "service_type", ), ) @_stability_warning(Stability.BETA) def post( self, *, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_name: t.Optional[str] = None, is_native: t.Optional[bool] = None, language: t.Optional[str] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, service_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a connector.

Connectors are Elasticsearch integrations that bring content from third-party data sources, which can be deployed on Elastic Cloud or hosted on your own infrastructure. Elastic managed connectors (Native connectors) are a managed service on Elastic Cloud. Self-managed connectors (Connector clients) are self-managed on your infrastructure.

``_ :param description: :param index_name: :param is_native: :param language: :param name: :param service_type: """ __path_parts: t.Dict[str, str] = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if index_name is not None: __body["index_name"] = index_name if is_native is not None: __body["is_native"] = is_native if language is not None: __body["language"] = language if name is not None: __body["name"] = name if service_type is not None: __body["service_type"] = service_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.post", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "index_name", "is_native", "language", "name", "service_type", ), ) @_stability_warning(Stability.BETA) def put( self, *, connector_id: t.Optional[str] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_name: t.Optional[str] = None, is_native: t.Optional[bool] = None, language: t.Optional[str] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, service_type: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a connector.

``_ :param connector_id: The unique identifier of the connector to be created or updated. ID is auto-generated if not provided. :param description: :param index_name: :param is_native: :param language: :param name: :param service_type: """ __path_parts: t.Dict[str, str] if connector_id not in SKIP_IN_PATH: __path_parts = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}' else: __path_parts = {} __path = "/_connector" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if index_name is not None: __body["index_name"] = index_name if is_native is not None: __body["is_native"] = is_native if language is not None: __body["language"] = language if name is not None: __body["name"] = name if service_type is not None: __body["service_type"] = service_type if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.put", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_cancel( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a connector sync job.

Cancel a connector sync job, which sets the status to cancelling and updates cancellation_requested_at to the current time. The connector service is then responsible for setting the status of connector sync jobs to cancelled.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = ( f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_cancel' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_cancel", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def sync_job_check_in( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check in a connector sync job. Check in a connector sync job and set the last_seen field to the current time before updating it in the internal index.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be checked in. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = ( f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_check_in' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_check_in", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("worker_hostname", "sync_cursor"), ) @_stability_warning(Stability.EXPERIMENTAL) def sync_job_claim( self, *, connector_sync_job_id: str, worker_hostname: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, sync_cursor: t.Optional[t.Any] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Claim a connector sync job. This action updates the job status to in_progress and sets the last_seen and started_at timestamps to the current time. Additionally, it can set the sync_cursor property for the sync job.

This API is not intended for direct connector management by users. It supports the implementation of services that utilize the connector protocol to communicate with Elasticsearch.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param worker_hostname: The host name of the current system that will run the job. :param sync_cursor: The cursor object from the last incremental sync job. This should reference the `sync_cursor` field in the connector state for which the job runs. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if worker_hostname is None and body is None: raise ValueError("Empty value passed for parameter 'worker_hostname'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_claim' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if worker_hostname is not None: __body["worker_hostname"] = worker_hostname if sync_cursor is not None: __body["sync_cursor"] = sync_cursor __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_claim", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_delete( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a connector sync job.

Remove a connector sync job and its associated data. This is a destructive action that is not recoverable.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job to be deleted """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_delete", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("error",), ) @_stability_warning(Stability.EXPERIMENTAL) def sync_job_error( self, *, connector_sync_job_id: str, error: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set a connector sync job error. Set the error field for a connector sync job and set its status to error.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier for the connector sync job. :param error: The error for the connector sync job error field. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if error is None and body is None: raise ValueError("Empty value passed for parameter 'error'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_error' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if error is not None: __body["error"] = error __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_error", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def sync_job_get( self, *, connector_sync_job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a connector sync job.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_get", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) def sync_job_list( self, *, connector_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, job_type: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["access_control", "full", "incremental"]] ], t.Union[str, t.Literal["access_control", "full", "incremental"]], ] ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, status: t.Optional[ t.Union[ str, t.Literal[ "canceled", "canceling", "completed", "error", "in_progress", "pending", "suspended", ], ] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all connector sync jobs.

Get information about all stored connector sync jobs listed by their creation date in ascending order.

``_ :param connector_id: A connector id to fetch connector sync jobs for :param from_: Starting offset (default: 0) :param job_type: A comma-separated list of job types to fetch the sync jobs for :param size: Specifies a max number of results to get :param status: A sync job status to fetch connector sync jobs for """ __path_parts: t.Dict[str, str] = {} __path = "/_connector/_sync_job" __query: t.Dict[str, t.Any] = {} if connector_id is not None: __query["connector_id"] = connector_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if job_type is not None: __query["job_type"] = job_type if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if status is not None: __query["status"] = status __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="connector.sync_job_list", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id", "job_type", "trigger_method"), ) @_stability_warning(Stability.BETA) def sync_job_post( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_type: t.Optional[ t.Union[str, t.Literal["access_control", "full", "incremental"]] ] = None, pretty: t.Optional[bool] = None, trigger_method: t.Optional[ t.Union[str, t.Literal["on_demand", "scheduled"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a connector sync job.

Create a connector sync job document in the internal index and initialize its counters and timestamps with default values.

``_ :param id: The id of the associated connector :param job_type: :param trigger_method: """ if id is None and body is None: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {} __path = "/_connector/_sync_job" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if job_type is not None: __body["job_type"] = job_type if trigger_method is not None: __body["trigger_method"] = trigger_method __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_post", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "deleted_document_count", "indexed_document_count", "indexed_document_volume", "last_seen", "metadata", "total_document_count", ), ) @_stability_warning(Stability.EXPERIMENTAL) def sync_job_update_stats( self, *, connector_sync_job_id: str, deleted_document_count: t.Optional[int] = None, indexed_document_count: t.Optional[int] = None, indexed_document_volume: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, last_seen: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, total_document_count: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set the connector sync job stats. Stats include: deleted_document_count, indexed_document_count, indexed_document_volume, and total_document_count. You can also update last_seen. This API is mainly used by the connector service for updating sync job information.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_sync_job_id: The unique identifier of the connector sync job. :param deleted_document_count: The number of documents the sync job deleted. :param indexed_document_count: The number of documents the sync job indexed. :param indexed_document_volume: The total size of the data (in MiB) the sync job indexed. :param last_seen: The timestamp to use in the `last_seen` property for the connector sync job. :param metadata: The connector-specific metadata. :param total_document_count: The total number of documents in the target index after the sync job finished. """ if connector_sync_job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_sync_job_id'") if deleted_document_count is None and body is None: raise ValueError( "Empty value passed for parameter 'deleted_document_count'" ) if indexed_document_count is None and body is None: raise ValueError( "Empty value passed for parameter 'indexed_document_count'" ) if indexed_document_volume is None and body is None: raise ValueError( "Empty value passed for parameter 'indexed_document_volume'" ) __path_parts: t.Dict[str, str] = { "connector_sync_job_id": _quote(connector_sync_job_id) } __path = f'/_connector/_sync_job/{__path_parts["connector_sync_job_id"]}/_stats' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if deleted_document_count is not None: __body["deleted_document_count"] = deleted_document_count if indexed_document_count is not None: __body["indexed_document_count"] = indexed_document_count if indexed_document_volume is not None: __body["indexed_document_volume"] = indexed_document_volume if last_seen is not None: __body["last_seen"] = last_seen if metadata is not None: __body["metadata"] = metadata if total_document_count is not None: __body["total_document_count"] = total_document_count __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.sync_job_update_stats", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def update_active_filtering( self, *, connector_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate the connector draft filter.

Activates the valid draft filtering for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering/_activate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="connector.update_active_filtering", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("api_key_id", "api_key_secret_id"), ) @_stability_warning(Stability.BETA) def update_api_key_id( self, *, connector_id: str, api_key_id: t.Optional[str] = None, api_key_secret_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector API key ID.

Update the api_key_id and api_key_secret_id fields of a connector. You can specify the ID of the API key used for authorization and the ID of the connector secret where the API key is stored. The connector secret ID is required only for Elastic managed (native) connectors. Self-managed connectors (connector clients) do not use this field.

``_ :param connector_id: The unique identifier of the connector to be updated :param api_key_id: :param api_key_secret_id: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_api_key_id' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if api_key_id is not None: __body["api_key_id"] = api_key_id if api_key_secret_id is not None: __body["api_key_secret_id"] = api_key_secret_id __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_api_key_id", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("configuration", "values"), ) @_stability_warning(Stability.BETA) def update_configuration( self, *, connector_id: str, configuration: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, values: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector configuration.

Update the configuration field in the connector document.

``_ :param connector_id: The unique identifier of the connector to be updated :param configuration: :param values: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_configuration' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if configuration is not None: __body["configuration"] = configuration if values is not None: __body["values"] = values __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_configuration", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("error",), ) @_stability_warning(Stability.EXPERIMENTAL) def update_error( self, *, connector_id: str, error: t.Optional[t.Union[None, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector error field.

Set the error field for the connector. If the error provided in the request body is non-null, the connector’s status is updated to error. Otherwise, if the error is reset to null, the connector status is updated to connected.

``_ :param connector_id: The unique identifier of the connector to be updated :param error: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if error is None and body is None: raise ValueError("Empty value passed for parameter 'error'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_error' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if error is not None: __body["error"] = error __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_error", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("features",), ) @_stability_warning(Stability.EXPERIMENTAL) def update_features( self, *, connector_id: str, features: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector features. Update the connector features in the connector document. This API can be used to control the following aspects of a connector:

  • document-level security
  • incremental syncs
  • advanced sync rules
  • basic sync rules

Normally, the running connector service automatically manages these features. However, you can use this API to override the default behavior.

To sync data using self-managed connectors, you need to deploy the Elastic connector service on your own infrastructure. This service runs automatically on Elastic Cloud for Elastic managed connectors.

``_ :param connector_id: The unique identifier of the connector to be updated. :param features: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if features is None and body is None: raise ValueError("Empty value passed for parameter 'features'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_features' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if features is not None: __body["features"] = features __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_features", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("advanced_snippet", "filtering", "rules"), ) @_stability_warning(Stability.BETA) def update_filtering( self, *, connector_id: str, advanced_snippet: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, filtering: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rules: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector filtering.

Update the draft filtering configuration of a connector and marks the draft validation state as edited. The filtering draft is activated once validated by the running Elastic connector service. The filtering property is used to configure sync rules (both basic and advanced) for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated :param advanced_snippet: :param filtering: :param rules: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if advanced_snippet is not None: __body["advanced_snippet"] = advanced_snippet if filtering is not None: __body["filtering"] = filtering if rules is not None: __body["rules"] = rules __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_filtering", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("validation",), ) @_stability_warning(Stability.EXPERIMENTAL) def update_filtering_validation( self, *, connector_id: str, validation: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector draft filtering validation.

Update the draft filtering validation info for a connector.

``_ :param connector_id: The unique identifier of the connector to be updated :param validation: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if validation is None and body is None: raise ValueError("Empty value passed for parameter 'validation'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_filtering/_validation' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if validation is not None: __body["validation"] = validation __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_filtering_validation", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_name",), ) @_stability_warning(Stability.BETA) def update_index_name( self, *, connector_id: str, index_name: t.Optional[t.Union[None, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector index name.

Update the index_name field of a connector, specifying the index where the data ingested by the connector is stored.

``_ :param connector_id: The unique identifier of the connector to be updated :param index_name: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if index_name is None and body is None: raise ValueError("Empty value passed for parameter 'index_name'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_index_name' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if index_name is not None: __body["index_name"] = index_name __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_index_name", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "name"), ) @_stability_warning(Stability.BETA) def update_name( self, *, connector_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector name and description.

``_ :param connector_id: The unique identifier of the connector to be updated :param description: :param name: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_name' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if name is not None: __body["name"] = name __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_name", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("is_native",), ) @_stability_warning(Stability.BETA) def update_native( self, *, connector_id: str, is_native: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector is_native flag.

``_ :param connector_id: The unique identifier of the connector to be updated :param is_native: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if is_native is None and body is None: raise ValueError("Empty value passed for parameter 'is_native'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_native' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if is_native is not None: __body["is_native"] = is_native __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_native", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("pipeline",), ) @_stability_warning(Stability.BETA) def update_pipeline( self, *, connector_id: str, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector pipeline.

When you create a new connector, the configuration of an ingest pipeline is populated with default settings.

``_ :param connector_id: The unique identifier of the connector to be updated :param pipeline: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if pipeline is None and body is None: raise ValueError("Empty value passed for parameter 'pipeline'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_pipeline' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if pipeline is not None: __body["pipeline"] = pipeline __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("scheduling",), ) @_stability_warning(Stability.BETA) def update_scheduling( self, *, connector_id: str, scheduling: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector scheduling.

``_ :param connector_id: The unique identifier of the connector to be updated :param scheduling: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if scheduling is None and body is None: raise ValueError("Empty value passed for parameter 'scheduling'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_scheduling' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if scheduling is not None: __body["scheduling"] = scheduling __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_scheduling", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service_type",), ) @_stability_warning(Stability.BETA) def update_service_type( self, *, connector_id: str, service_type: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector service type.

``_ :param connector_id: The unique identifier of the connector to be updated :param service_type: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if service_type is None and body is None: raise ValueError("Empty value passed for parameter 'service_type'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_service_type' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if service_type is not None: __body["service_type"] = service_type __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_service_type", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("status",), ) @_stability_warning(Stability.EXPERIMENTAL) def update_status( self, *, connector_id: str, status: t.Optional[ t.Union[ str, t.Literal[ "configured", "connected", "created", "error", "needs_configuration" ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the connector status.

``_ :param connector_id: The unique identifier of the connector to be updated :param status: """ if connector_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'connector_id'") if status is None and body is None: raise ValueError("Empty value passed for parameter 'status'") __path_parts: t.Dict[str, str] = {"connector_id": _quote(connector_id)} __path = f'/_connector/{__path_parts["connector_id"]}/_status' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if status is not None: __body["status"] = status __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="connector.update_status", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/dangling_indices.py000066400000000000000000000204531506101734100267130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class DanglingIndicesClient(NamespacedClient): @_rewrite_parameters() def delete_dangling_index( self, *, index_uuid: str, accept_data_loss: bool, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a dangling index. If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

``_ :param index_uuid: The UUID of the index to delete. Use the get dangling indices API to find the UUID. :param accept_data_loss: This parameter must be set to true to acknowledge that it will no longer be possible to recove data from the dangling index. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index_uuid'") if accept_data_loss is None: raise ValueError("Empty value passed for parameter 'accept_data_loss'") __path_parts: t.Dict[str, str] = {"index_uuid": _quote(index_uuid)} __path = f'/_dangling/{__path_parts["index_uuid"]}' __query: t.Dict[str, t.Any] = {} if accept_data_loss is not None: __query["accept_data_loss"] = accept_data_loss if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.delete_dangling_index", path_parts=__path_parts, ) @_rewrite_parameters() def import_dangling_index( self, *, index_uuid: str, accept_data_loss: bool, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Import a dangling index.

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

``_ :param index_uuid: The UUID of the index to import. Use the get dangling indices API to locate the UUID. :param accept_data_loss: This parameter must be set to true to import a dangling index. Because Elasticsearch cannot know where the dangling index data came from or determine which shard copies are fresh and which are stale, it cannot guarantee that the imported data represents the latest state of the index when it was last in the cluster. :param master_timeout: Specify timeout for connection to master :param timeout: Explicit operation timeout """ if index_uuid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index_uuid'") if accept_data_loss is None: raise ValueError("Empty value passed for parameter 'accept_data_loss'") __path_parts: t.Dict[str, str] = {"index_uuid": _quote(index_uuid)} __path = f'/_dangling/{__path_parts["index_uuid"]}' __query: t.Dict[str, t.Any] = {} if accept_data_loss is not None: __query["accept_data_loss"] = accept_data_loss if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.import_dangling_index", path_parts=__path_parts, ) @_rewrite_parameters() def list_dangling_indices( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the dangling indices.

If Elasticsearch encounters index data that is absent from the current cluster state, those indices are considered to be dangling. For example, this can happen if you delete more than cluster.indices.tombstones.size indices while an Elasticsearch node is offline.

Use this API to list dangling indices, which you can then import or delete.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_dangling" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="dangling_indices.list_dangling_indices", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/enrich.py000066400000000000000000000257421506101734100247100ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EnrichClient(NamespacedClient): @_rewrite_parameters() def delete_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an enrich policy. Deletes an existing enrich policy and its enrich index.

``_ :param name: Enrich policy to delete. :param master_timeout: Period to wait for a connection to the master node. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="enrich.delete_policy", path_parts=__path_parts, ) @_rewrite_parameters() def execute_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an enrich policy. Create the enrich index for an existing enrich policy.

``_ :param name: Enrich policy to execute. :param master_timeout: Period to wait for a connection to the master node. :param wait_for_completion: If `true`, the request blocks other enrich policy execution requests until complete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}/_execute' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="enrich.execute_policy", path_parts=__path_parts, ) @_rewrite_parameters() def get_policy( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an enrich policy. Returns information about an enrich policy.

``_ :param name: Comma-separated list of enrich policy names used to limit the request. To return information for all enrich policies, omit this parameter. :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' else: __path_parts = {} __path = "/_enrich/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="enrich.get_policy", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("geo_match", "match", "range"), ) def put_policy( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, geo_match: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, match: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, range: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an enrich policy. Creates an enrich policy.

``_ :param name: Name of the enrich policy to create or update. :param geo_match: Matches enrich data to incoming documents based on a `geo_shape` query. :param master_timeout: Period to wait for a connection to the master node. :param match: Matches enrich data to incoming documents based on a `term` query. :param range: Matches a number, date, or IP address in incoming documents to a range in the enrich index based on a `term` query. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_enrich/policy/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if geo_match is not None: __body["geo_match"] = geo_match if match is not None: __body["match"] = match if range is not None: __body["range"] = range __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="enrich.put_policy", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get enrich stats. Returns enrich coordinator statistics and information about enrich policies that are currently executing.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_enrich/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="enrich.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/eql.py000066400000000000000000000370141506101734100242140ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class EqlClient(NamespacedClient): @_rewrite_parameters() def delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async EQL search. Delete an async EQL search or a stored synchronous EQL search. The API also deletes results for the search.

``_ :param id: Identifier for the search to delete. A search ID is provided in the EQL search API's response for an async search. A search ID is also provided if the request’s `keep_on_completion` parameter is `true`. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="eql.delete", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async EQL search results. Get the current status and available results for an async EQL search or a stored synchronous EQL search.

``_ :param id: Identifier for the search. :param keep_alive: Period for which the search and its results are stored on the cluster. Defaults to the keep_alive value set by the search’s EQL search API request. :param wait_for_completion_timeout: Timeout duration to wait for the request to finish. Defaults to no timeout, meaning the request waits for complete search results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="eql.get", path_parts=__path_parts, ) @_rewrite_parameters() def get_status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async EQL status. Get the current status for an async EQL search or a stored synchronous EQL search without returning results.

``_ :param id: Identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_eql/search/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="eql.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "query", "allow_partial_search_results", "allow_partial_sequence_results", "case_sensitive", "event_category_field", "fetch_size", "fields", "filter", "keep_alive", "keep_on_completion", "max_samples_per_key", "result_position", "runtime_mappings", "size", "tiebreaker_field", "timestamp_field", "wait_for_completion_timeout", ), ) def search( self, *, index: t.Union[str, t.Sequence[str]], query: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, allow_partial_sequence_results: t.Optional[bool] = None, case_sensitive: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, event_category_field: t.Optional[str] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fetch_size: t.Optional[int] = None, fields: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, filter: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, max_samples_per_key: t.Optional[int] = None, pretty: t.Optional[bool] = None, result_position: t.Optional[t.Union[str, t.Literal["head", "tail"]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, size: t.Optional[int] = None, tiebreaker_field: t.Optional[str] = None, timestamp_field: t.Optional[str] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get EQL search results. Returns search results for an Event Query Language (EQL) query. EQL assumes each document in a data stream or index corresponds to an event.

``_ :param index: The name of the index to scope the operation :param query: EQL query you wish to run. :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param allow_partial_search_results: Allow query execution also in case of shard failures. If true, the query will keep running and will return results based on the available shards. For sequences, the behavior can be further refined using allow_partial_sequence_results :param allow_partial_sequence_results: This flag applies only to sequences and has effect only if allow_partial_search_results=true. If true, the sequence query will return results based on the available shards, ignoring the others. If false, the sequence query will return successfully, but will always have empty results. :param case_sensitive: :param ccs_minimize_roundtrips: Indicates whether network round-trips should be minimized as part of cross-cluster search requests execution :param event_category_field: Field containing the event classification, such as process, file, or network. :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param fetch_size: Maximum number of events to search at a time for sequence queries. :param fields: Array of wildcard (*) patterns. The response returns values for field names matching these patterns in the fields property of each hit. :param filter: Query, written in Query DSL, used to filter the events on which the EQL query runs. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param keep_alive: :param keep_on_completion: :param max_samples_per_key: By default, the response of a sample query contains up to `10` samples, with one sample per unique set of join keys. Use the `size` parameter to get a smaller or larger set of samples. To retrieve more than one sample per set of join keys, use the `max_samples_per_key` parameter. Pipes are not supported for sample queries. :param result_position: :param runtime_mappings: :param size: For basic queries, the maximum number of matching events to return. Defaults to 10 :param tiebreaker_field: Field used to sort hits with the same timestamp in ascending order :param timestamp_field: Field containing event timestamp. Default "@timestamp" :param wait_for_completion_timeout: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_eql/search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = query if allow_partial_search_results is not None: __body["allow_partial_search_results"] = allow_partial_search_results if allow_partial_sequence_results is not None: __body["allow_partial_sequence_results"] = ( allow_partial_sequence_results ) if case_sensitive is not None: __body["case_sensitive"] = case_sensitive if event_category_field is not None: __body["event_category_field"] = event_category_field if fetch_size is not None: __body["fetch_size"] = fetch_size if fields is not None: __body["fields"] = fields if filter is not None: __body["filter"] = filter if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if max_samples_per_key is not None: __body["max_samples_per_key"] = max_samples_per_key if result_position is not None: __body["result_position"] = result_position if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if size is not None: __body["size"] = size if tiebreaker_field is not None: __body["tiebreaker_field"] = tiebreaker_field if timestamp_field is not None: __body["timestamp_field"] = timestamp_field if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="eql.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/esql.py000066400000000000000000000665231506101734100244060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) if t.TYPE_CHECKING: from elasticsearch.esql import ESQLBase class EsqlClient(NamespacedClient): @_rewrite_parameters( body_fields=( "query", "columnar", "filter", "include_ccs_metadata", "keep_alive", "keep_on_completion", "locale", "params", "profile", "tables", "wait_for_completion_timeout", ), ignore_deprecated_options={"params"}, ) def async_query( self, *, query: t.Optional[t.Union[str, "ESQLBase"]] = None, allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, include_ccs_metadata: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, tables: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an async ES|QL query. Asynchronously run an ES|QL (Elasticsearch query language) query, monitor its progress, and retrieve results when they become available.

The API accepts the same parameters and request body as the synchronous query API, along with additional async related properties.

``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures. To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. :param delimiter: The character to use between values within a CSV row. It is valid only for the CSV format. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. For async requests, nothing will be returned if the async query doesn't finish within the timeout. The query ID and running status are available in the `X-Elasticsearch-Async-Id` and `X-Elasticsearch-Async-Is-Running` HTTP headers of the response, respectively. :param include_ccs_metadata: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. :param keep_alive: The period for which the query and its results are stored in the cluster. The default period is five days. When this period expires, the query and its results are deleted, even if the query is still ongoing. If the `keep_on_completion` parameter is false, Elasticsearch only stores async queries that do not complete within the period set by the `wait_for_completion_timeout` parameter, regardless of this value. :param keep_on_completion: Indicates whether the query and its results are stored in the cluster. If false, the query and its results are stored in the cluster only if the request does not complete during the period set by the `wait_for_completion_timeout` parameter. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. :param profile: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. :param tables: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. :param wait_for_completion_timeout: The period to wait for the request to finish. By default, the request waits for 1 second for the query results. If the query completes during this period, results are returned Otherwise, a query ID is returned that can later be used to retrieve the results. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_query/async" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_partial_results is not None: __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = str(query) if columnar is not None: __body["columnar"] = columnar if filter is not None: __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if locale is not None: __body["locale"] = locale if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if tables is not None: __body["tables"] = tables if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="esql.async_query", path_parts=__path_parts, ) @_rewrite_parameters() def async_query_delete( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async ES|QL query. If the query is still running, it is cancelled. Otherwise, the stored results are deleted.

If the Elasticsearch security features are enabled, only the following users can use this API to delete a query:

  • The authenticated user that submitted the original query request
  • Users with the cancel_task cluster privilege
``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_delete", path_parts=__path_parts, ) @_rewrite_parameters() def async_query_get( self, *, id: str, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async ES|QL query results. Get the current status and available results or stored results for an ES|QL asynchronous query. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can retrieve the results using this API.

``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. :param format: A short version of the Accept header, for example `json` or `yaml`. :param keep_alive: The period for which the query and its results are stored in the cluster. When this period expires, the query and its results are deleted, even if the query is still ongoing. :param wait_for_completion_timeout: The period to wait for the request to finish. By default, the request waits for complete query results. If the request completes during the period specified in this parameter, complete query results are returned. Otherwise, the response returns an `is_running` value of `true` and no results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_get", path_parts=__path_parts, ) @_rewrite_parameters() def async_query_stop( self, *, id: str, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop async ES|QL query.

This API interrupts the query execution and returns the results so far. If the Elasticsearch security features are enabled, only the user who first submitted the ES|QL query can stop it.

``_ :param id: The unique identifier of the query. A query ID is provided in the ES|QL async query API response for a query that does not complete in the designated time. A query ID is also provided when the request was submitted with the `keep_on_completion` parameter set to `true`. :param drop_null_columns: Indicates whether columns that are entirely `null` will be removed from the `columns` and `values` portion of the results. If `true`, the response will include an extra section under the name `all_columns` which has the name of all the columns. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/async/{__path_parts["id"]}/stop' __query: t.Dict[str, t.Any] = {} if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="esql.async_query_stop", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_query( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a specific running ES|QL query information. Returns an object extended information about a running ES|QL query.

``_ :param id: The query ID """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_query/queries/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.get_query", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def list_queries( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get running ES|QL queries information. Returns an object containing IDs and other information about the running ES|QL queries.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_query/queries" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="esql.list_queries", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "query", "columnar", "filter", "include_ccs_metadata", "locale", "params", "profile", "tables", ), ignore_deprecated_options={"params"}, ) def query( self, *, query: t.Optional[t.Union[str, "ESQLBase"]] = None, allow_partial_results: t.Optional[bool] = None, columnar: t.Optional[bool] = None, delimiter: t.Optional[str] = None, drop_null_columns: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal[ "arrow", "cbor", "csv", "json", "smile", "tsv", "txt", "yaml" ], ] ] = None, human: t.Optional[bool] = None, include_ccs_metadata: t.Optional[bool] = None, locale: t.Optional[str] = None, params: t.Optional[t.Sequence[t.Union[None, bool, float, int, str]]] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, tables: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run an ES|QL query. Get search results for an ES|QL (Elasticsearch query language) query.

``_ :param query: The ES|QL query API accepts an ES|QL query string in the query parameter, runs it, and returns the results. :param allow_partial_results: If `true`, partial results will be returned if there are shard failures, but the query can continue to execute on other clusters and shards. If `false`, the query will fail if there are any failures. To override the default behavior, you can set the `esql.query.allow_partial_results` cluster setting to `false`. :param columnar: By default, ES|QL returns results as rows. For example, FROM returns each individual document as one row. For the JSON, YAML, CBOR and smile formats, ES|QL can return the results in a columnar fashion where one row represents all the values of a certain column in the results. :param delimiter: The character to use between values within a CSV row. Only valid for the CSV format. :param drop_null_columns: Should columns that are entirely `null` be removed from the `columns` and `values` portion of the results? Defaults to `false`. If `true` then the response will include an extra section under the name `all_columns` which has the name of all columns. :param filter: Specify a Query DSL query in the filter parameter to filter the set of documents that an ES|QL query runs on. :param format: A short version of the Accept header, e.g. json, yaml. `csv`, `tsv`, and `txt` formats will return results in a tabular format, excluding other metadata fields from the response. :param include_ccs_metadata: When set to `true` and performing a cross-cluster query, the response will include an extra `_clusters` object with information about the clusters that participated in the search along with info such as shards count. :param locale: :param params: To avoid any attempts of hacking or code injection, extract the values in a separate list of parameters. Use question mark placeholders (?) in the query string for each of the parameters. :param profile: If provided and `true` the response will include an extra `profile` object with information on how the query was executed. This information is for human debugging and its format can change at any time but it can give some insight into the performance of each part of the query. :param tables: Tables to use with the LOOKUP operation. The top level key is the table name and the next level key is the column name. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_partial_results is not None: __query["allow_partial_results"] = allow_partial_results if delimiter is not None: __query["delimiter"] = delimiter if drop_null_columns is not None: __query["drop_null_columns"] = drop_null_columns if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = str(query) if columnar is not None: __body["columnar"] = columnar if filter is not None: __body["filter"] = filter if include_ccs_metadata is not None: __body["include_ccs_metadata"] = include_ccs_metadata if locale is not None: __body["locale"] = locale if params is not None: __body["params"] = params if profile is not None: __body["profile"] = profile if tables is not None: __body["tables"] = tables __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="esql.query", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/features.py000066400000000000000000000140341506101734100252460ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import Stability, _rewrite_parameters, _stability_warning class FeaturesClient(NamespacedClient): @_rewrite_parameters() def get_features( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the features. Get a list of features that can be included in snapshots using the feature_states field when creating a snapshot. You can use this API to determine which feature states to include when taking a snapshot. By default, all feature states are included in a snapshot if that snapshot includes the global state, or none if it does not.

A feature state includes one or more system indices necessary for a given feature to function. In order to ensure data integrity, all system indices that comprise a feature state are snapshotted and restored together.

The features listed by this API are a combination of built-in features and features defined by plugins. In order for a feature state to be listed in this API and recognized as a valid feature state by the create snapshot API, the plugin that defines that feature must be installed on the master node.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="features.get_features", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def reset_features( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset the features. Clear all of the state information stored in system indices by Elasticsearch features, including the security and machine learning indices.

WARNING: Intended for development and testing use only. Do not reset features on a production cluster.

Return a cluster to the same state as a new installation by resetting the feature state for all Elasticsearch features. This deletes all state information stored in system indices.

The response code is HTTP 200 if the state is successfully reset for all features. It is HTTP 500 if the reset operation failed for any feature.

Note that select features might provide a way to reset particular system indices. Using this API resets all features, both those that are built-in and implemented as plugins.

To list the features that will be affected, use the get features API.

IMPORTANT: The features installed on the node you submit this request to are the features that will be reset. Run on the master node if you have any doubts about which plugins are installed on individual nodes.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_features/_reset" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="features.reset_features", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/fleet.py000066400000000000000000000745211506101734100245360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class FleetClient(NamespacedClient): @_rewrite_parameters() def global_checkpoints( self, *, index: str, checkpoints: t.Optional[t.Sequence[int]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_advance: t.Optional[bool] = None, wait_for_index: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get global checkpoints.

Get the current global checkpoints for an index. This API is designed for internal use by the Fleet server project.

``_ :param index: A single index or index alias that resolves to a single index. :param checkpoints: A comma separated list of previous global checkpoints. When used in combination with `wait_for_advance`, the API will only return once the global checkpoints advances past the checkpoints. Providing an empty list will cause Elasticsearch to immediately return the current global checkpoints. :param timeout: Period to wait for a global checkpoints to advance past `checkpoints`. :param wait_for_advance: A boolean value which controls whether to wait (until the timeout) for the global checkpoints to advance past the provided `checkpoints`. :param wait_for_index: A boolean value which controls whether to wait (until the timeout) for the target index to exist and all primary shards be active. Can only be true when `wait_for_advance` is true. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/global_checkpoints' __query: t.Dict[str, t.Any] = {} if checkpoints is not None: __query["checkpoints"] = checkpoints if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_advance is not None: __query["wait_for_advance"] = wait_for_advance if wait_for_index is not None: __query["wait_for_index"] = wait_for_index __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="fleet.global_checkpoints", path_parts=__path_parts, ) @_rewrite_parameters( body_name="searches", ) @_stability_warning(Stability.EXPERIMENTAL) def msearch( self, *, searches: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, max_concurrent_searches: t.Optional[int] = None, max_concurrent_shard_requests: t.Optional[int] = None, pre_filter_shard_size: t.Optional[int] = None, pretty: t.Optional[bool] = None, rest_total_hits_as_int: t.Optional[bool] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, typed_keys: t.Optional[bool] = None, wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run multiple Fleet searches. Run several Fleet searches with a single API request. The API follows the same structure as the multi search API. However, similar to the Fleet search API, it supports the wait_for_checkpoints parameter.

``_ :param searches: :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param allow_partial_search_results: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. :param ccs_minimize_roundtrips: If true, network roundtrips between the coordinating node and remote clusters are minimized for cross-cluster search requests. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_throttled: If true, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param max_concurrent_searches: Maximum number of concurrent searches the multi search API can execute. :param max_concurrent_shard_requests: Maximum number of concurrent shard requests that each sub-search request executes per node. :param pre_filter_shard_size: Defines a threshold that enforces a pre-filter roundtrip to prefilter search shards based on query rewriting if the number of shards the search request expands to exceeds the threshold. This filter roundtrip can limit the number of shards significantly if for instance a shard can not match any documents based on its rewrite method i.e., if date filters are mandatory to match but the shard bounds and the query are disjoint. :param rest_total_hits_as_int: If true, hits.total are returned as an integer in the response. Defaults to false, which returns an object. :param search_type: Indicates whether global term and document frequencies should be used when scoring returned documents. :param typed_keys: Specifies whether aggregation and suggester names should be prefixed by their respective types in the response. :param wait_for_checkpoints: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. """ if searches is None and body is None: raise ValueError( "Empty value passed for parameters 'searches' and 'body', one of them should be set." ) elif searches is not None and body is not None: raise ValueError("Cannot set both 'searches' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/_fleet_msearch' else: __path_parts = {} __path = "/_fleet/_fleet_msearch" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_concurrent_searches is not None: __query["max_concurrent_searches"] = max_concurrent_searches if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if search_type is not None: __query["search_type"] = search_type if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_checkpoints is not None: __query["wait_for_checkpoints"] = wait_for_checkpoints __body = searches if searches is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="fleet.msearch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "collapse", "docvalue_fields", "explain", "ext", "fields", "from_", "highlight", "indices_boost", "min_score", "pit", "post_filter", "profile", "query", "rescore", "runtime_mappings", "script_fields", "search_after", "seq_no_primary_term", "size", "slice", "sort", "source", "stats", "stored_fields", "suggest", "terminate_after", "timeout", "track_scores", "track_total_hits", "version", ), parameter_aliases={ "_source": "source", "_source_excludes": "source_excludes", "_source_includes": "source_includes", "from": "from_", }, ) @_stability_warning(Stability.EXPERIMENTAL) def search( self, *, index: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, allow_partial_search_results: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, batched_reduce_size: t.Optional[int] = None, ccs_minimize_roundtrips: t.Optional[bool] = None, collapse: t.Optional[t.Mapping[str, t.Any]] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, docvalue_fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, ext: t.Optional[t.Mapping[str, t.Any]] = None, fields: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, highlight: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indices_boost: t.Optional[t.Sequence[t.Mapping[str, float]]] = None, lenient: t.Optional[bool] = None, max_concurrent_shard_requests: t.Optional[int] = None, min_score: t.Optional[float] = None, pit: t.Optional[t.Mapping[str, t.Any]] = None, post_filter: t.Optional[t.Mapping[str, t.Any]] = None, pre_filter_shard_size: t.Optional[int] = None, preference: t.Optional[str] = None, pretty: t.Optional[bool] = None, profile: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, request_cache: t.Optional[bool] = None, rescore: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, rest_total_hits_as_int: t.Optional[bool] = None, routing: t.Optional[str] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, search_type: t.Optional[ t.Union[str, t.Literal["dfs_query_then_fetch", "query_then_fetch"]] ] = None, seq_no_primary_term: t.Optional[bool] = None, size: t.Optional[int] = None, slice: t.Optional[t.Mapping[str, t.Any]] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, source: t.Optional[t.Union[bool, t.Mapping[str, t.Any]]] = None, source_excludes: t.Optional[t.Union[str, t.Sequence[str]]] = None, source_includes: t.Optional[t.Union[str, t.Sequence[str]]] = None, stats: t.Optional[t.Sequence[str]] = None, stored_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, suggest: t.Optional[t.Mapping[str, t.Any]] = None, suggest_field: t.Optional[str] = None, suggest_mode: t.Optional[ t.Union[str, t.Literal["always", "missing", "popular"]] ] = None, suggest_size: t.Optional[int] = None, suggest_text: t.Optional[str] = None, terminate_after: t.Optional[int] = None, timeout: t.Optional[str] = None, track_scores: t.Optional[bool] = None, track_total_hits: t.Optional[t.Union[bool, int]] = None, typed_keys: t.Optional[bool] = None, version: t.Optional[bool] = None, wait_for_checkpoints: t.Optional[t.Sequence[int]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a Fleet search. The purpose of the Fleet search API is to provide an API where the search will be run only after the provided checkpoint has been processed and is visible for searches inside of Elasticsearch.

``_ :param index: A single target to search. If the target is an index alias, it must resolve to a single index. :param aggregations: :param aggs: :param allow_no_indices: :param allow_partial_search_results: If true, returns partial results if there are shard request timeouts or shard failures. If false, returns an error with no partial results. Defaults to the configured cluster setting `search.default_allow_partial_results`, which is true by default. :param analyze_wildcard: :param analyzer: :param batched_reduce_size: :param ccs_minimize_roundtrips: :param collapse: :param default_operator: :param df: :param docvalue_fields: Array of wildcard (*) patterns. The request returns doc values for field names matching these patterns in the hits.fields property of the response. :param expand_wildcards: :param explain: If true, returns detailed information about score computation as part of a hit. :param ext: Configuration of search extensions defined by Elasticsearch plugins. :param fields: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. :param from_: Starting document offset. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param highlight: :param ignore_throttled: :param ignore_unavailable: :param indices_boost: Boosts the _score of documents from specified indices. :param lenient: :param max_concurrent_shard_requests: :param min_score: Minimum _score for matching documents. Documents with a lower _score are not included in search results and results collected by aggregations. :param pit: Limits the search to a point in time (PIT). If you provide a PIT, you cannot specify an in the request path. :param post_filter: :param pre_filter_shard_size: :param preference: :param profile: :param q: :param query: Defines the search definition using the Query DSL. :param request_cache: :param rescore: :param rest_total_hits_as_int: :param routing: :param runtime_mappings: Defines one or more runtime fields in the search request. These fields take precedence over mapped fields with the same name. :param script_fields: Retrieve a script evaluation (based on different fields) for each hit. :param scroll: :param search_after: :param search_type: :param seq_no_primary_term: If true, returns sequence number and primary term of the last modification of each hit. See Optimistic concurrency control. :param size: The number of hits to return. By default, you cannot page through more than 10,000 hits using the from and size parameters. To page through more hits, use the search_after parameter. :param slice: :param sort: :param source: Indicates which source fields are returned for matching documents. These fields are returned in the hits._source property of the search response. :param source_excludes: :param source_includes: :param stats: Stats groups to associate with the search. Each group maintains a statistics aggregation for its associated searches. You can retrieve these stats using the indices stats API. :param stored_fields: List of stored fields to return as part of a hit. If no fields are specified, no stored fields are included in the response. If this field is specified, the _source parameter defaults to false. You can pass _source: true to return both source fields and stored fields in the search response. :param suggest: :param suggest_field: Specifies which field to use for suggestions. :param suggest_mode: :param suggest_size: :param suggest_text: The source text for which the suggestions should be returned. :param terminate_after: Maximum number of documents to collect for each shard. If a query reaches this limit, Elasticsearch terminates the query early. Elasticsearch collects documents before sorting. Defaults to 0, which does not terminate query execution early. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param track_scores: If true, calculate and return document scores, even if the scores are not used for sorting. :param track_total_hits: Number of hits matching the query to count accurately. If true, the exact number of hits is returned at the cost of some performance. If false, the response does not include the total number of hits matching the query. Defaults to 10,000 hits. :param typed_keys: :param version: If true, returns document version as part of a hit. :param wait_for_checkpoints: A comma separated list of checkpoints. When configured, the search API will only be executed on a shard after the relevant checkpoint has become visible for search. Defaults to an empty list which will cause Elasticsearch to immediately execute the search. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_fleet/_fleet_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if allow_partial_search_results is not None: __query["allow_partial_search_results"] = allow_partial_search_results if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if batched_reduce_size is not None: __query["batched_reduce_size"] = batched_reduce_size if ccs_minimize_roundtrips is not None: __query["ccs_minimize_roundtrips"] = ccs_minimize_roundtrips if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if max_concurrent_shard_requests is not None: __query["max_concurrent_shard_requests"] = max_concurrent_shard_requests if pre_filter_shard_size is not None: __query["pre_filter_shard_size"] = pre_filter_shard_size if preference is not None: __query["preference"] = preference if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if request_cache is not None: __query["request_cache"] = request_cache if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if routing is not None: __query["routing"] = routing if scroll is not None: __query["scroll"] = scroll if search_type is not None: __query["search_type"] = search_type if source_excludes is not None: __query["_source_excludes"] = source_excludes if source_includes is not None: __query["_source_includes"] = source_includes if suggest_field is not None: __query["suggest_field"] = suggest_field if suggest_mode is not None: __query["suggest_mode"] = suggest_mode if suggest_size is not None: __query["suggest_size"] = suggest_size if suggest_text is not None: __query["suggest_text"] = suggest_text if typed_keys is not None: __query["typed_keys"] = typed_keys if wait_for_checkpoints is not None: __query["wait_for_checkpoints"] = wait_for_checkpoints if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if collapse is not None: __body["collapse"] = collapse if docvalue_fields is not None: __body["docvalue_fields"] = docvalue_fields if explain is not None: __body["explain"] = explain if ext is not None: __body["ext"] = ext if fields is not None: __body["fields"] = fields if from_ is not None: __body["from"] = from_ if highlight is not None: __body["highlight"] = highlight if indices_boost is not None: __body["indices_boost"] = indices_boost if min_score is not None: __body["min_score"] = min_score if pit is not None: __body["pit"] = pit if post_filter is not None: __body["post_filter"] = post_filter if profile is not None: __body["profile"] = profile if query is not None: __body["query"] = query if rescore is not None: __body["rescore"] = rescore if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if search_after is not None: __body["search_after"] = search_after if seq_no_primary_term is not None: __body["seq_no_primary_term"] = seq_no_primary_term if size is not None: __body["size"] = size if slice is not None: __body["slice"] = slice if sort is not None: __body["sort"] = sort if source is not None: __body["_source"] = source if stats is not None: __body["stats"] = stats if stored_fields is not None: __body["stored_fields"] = stored_fields if suggest is not None: __body["suggest"] = suggest if terminate_after is not None: __body["terminate_after"] = terminate_after if timeout is not None: __body["timeout"] = timeout if track_scores is not None: __body["track_scores"] = track_scores if track_total_hits is not None: __body["track_total_hits"] = track_total_hits if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="fleet.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/graph.py000066400000000000000000000120041506101734100245240ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class GraphClient(NamespacedClient): @_rewrite_parameters( body_fields=("connections", "controls", "query", "vertices"), ) def explore( self, *, index: t.Union[str, t.Sequence[str]], connections: t.Optional[t.Mapping[str, t.Any]] = None, controls: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, vertices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explore graph analytics. Extract and summarize information about the documents and terms in an Elasticsearch data stream or index. The easiest way to understand the behavior of this API is to use the Graph UI to explore connections. An initial request to the _explore API contains a seed query that identifies the documents of interest and specifies the fields that define the vertices and connections you want to include in the graph. Subsequent requests enable you to spider out from one more vertices of interest. You can exclude vertices that have already been returned.

``_ :param index: Name of the index. :param connections: Specifies or more fields from which you want to extract terms that are associated with the specified vertices. :param controls: Direct the Graph API how to build the graph. :param query: A seed query that identifies the documents of interest. Can be any valid Elasticsearch query. :param routing: Custom value used to route operations to a specific shard. :param timeout: Specifies the period of time to wait for a response from each shard. If no response is received before the timeout expires, the request fails and returns an error. Defaults to no timeout. :param vertices: Specifies one or more fields that contain the terms you want to include in the graph as vertices. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_graph/explore' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if routing is not None: __query["routing"] = routing if timeout is not None: __query["timeout"] = timeout if not __body: if connections is not None: __body["connections"] = connections if controls is not None: __body["controls"] = controls if query is not None: __body["query"] = query if vertices is not None: __body["vertices"] = vertices if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="graph.explore", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/ilm.py000066400000000000000000000675021506101734100242210ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class IlmClient(NamespacedClient): @_rewrite_parameters() def delete_lifecycle( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a lifecycle policy. You cannot delete policies that are currently in use. If the policy is being used to manage any indices, the request fails and returns an error.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ilm.delete_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def explain_lifecycle( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, only_errors: t.Optional[bool] = None, only_managed: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain the lifecycle state. Get the current lifecycle status for one or more indices. For data streams, the API retrieves the current lifecycle status for the stream's backing indices.

The response indicates when the index entered each lifecycle state, provides the definition of the running phase, and information about any failures.

``_ :param index: Comma-separated list of data streams, indices, and aliases to target. Supports wildcards (`*`). To target all data streams and indices, use `*` or `_all`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param only_errors: Filters the returned indices to only indices that are managed by ILM and are in an error state, either due to an encountering an error while executing the policy, or attempting to use a policy that does not exist. :param only_managed: Filters the returned indices to only indices that are managed by ILM. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/explain' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if only_errors is not None: __query["only_errors"] = only_errors if only_managed is not None: __query["only_managed"] = only_managed if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.explain_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def get_lifecycle( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get lifecycle policies.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' else: __path_parts = {} __path = "/_ilm/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.get_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def get_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the ILM status.

Get the current index lifecycle management status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ilm.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("legacy_template_to_delete", "node_attribute"), ) def migrate_to_data_tiers( self, *, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, legacy_template_to_delete: t.Optional[str] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, node_attribute: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Migrate to data tiers routing. Switch the indices, ILM policies, and legacy, composable, and component templates from using custom node attributes and attribute-based allocation filters to using data tiers. Optionally, delete one legacy index template. Using node roles enables ILM to automatically move the indices between data tiers.

Migrating away from custom node attributes routing can be manually performed. This API provides an automated way of performing three out of the four manual steps listed in the migration guide:

  1. Stop setting the custom hot attribute on new indices.
  2. Remove custom allocation settings from existing ILM policies.
  3. Replace custom allocation settings from existing indices with the corresponding tier preference.

ILM must be stopped before performing the migration. Use the stop ILM and get ILM status APIs to wait until the reported operation mode is STOPPED.

``_ :param dry_run: If true, simulates the migration from node attributes based allocation filters to data tiers, but does not perform the migration. This provides a way to retrieve the indices and ILM policies that need to be migrated. :param legacy_template_to_delete: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param node_attribute: """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/migrate_to_data_tiers" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if legacy_template_to_delete is not None: __body["legacy_template_to_delete"] = legacy_template_to_delete if node_attribute is not None: __body["node_attribute"] = node_attribute if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.migrate_to_data_tiers", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("current_step", "next_step"), ) def move_to_step( self, *, index: str, current_step: t.Optional[t.Mapping[str, t.Any]] = None, next_step: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Move to a lifecycle step. Manually move an index into a specific step in the lifecycle policy and run that step.

WARNING: This operation can result in the loss of data. Manually moving an index into a specific step runs that step even if it has already been performed. This is a potentially destructive action and this should be considered an expert level API.

You must specify both the current step and the step to be executed in the body of the request. The request will fail if the current step does not match the step currently running for the index This is to prevent the index from being moved from an unexpected step into the next step.

When specifying the target (next_step) to which the index will be moved, either the name or both the action and name fields are optional. If only the phase is specified, the index will move to the first step of the first action in the target phase. If the phase and action are specified, the index will move to the first step of the specified action in the specified phase. Only actions specified in the ILM policy are considered valid. An index cannot move to a step that is not part of its policy.

``_ :param index: The name of the index whose lifecycle step is to change :param current_step: The step that the index is expected to be in. :param next_step: The step that you want to run. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if current_step is None and body is None: raise ValueError("Empty value passed for parameter 'current_step'") if next_step is None and body is None: raise ValueError("Empty value passed for parameter 'next_step'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_ilm/move/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if current_step is not None: __body["current_step"] = current_step if next_step is not None: __body["next_step"] = next_step if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.move_to_step", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("policy",), ) def put_lifecycle( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, policy: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a lifecycle policy. If the specified policy exists, it is replaced and the policy version is incremented.

NOTE: Only the latest version of the policy is stored, you cannot revert to previous versions.

``_ :param name: Identifier for the policy. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param policy: :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"policy": _quote(name)} __path = f'/_ilm/policy/{__path_parts["policy"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if policy is not None: __body["policy"] = policy if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ilm.put_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def remove_policy( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Remove policies from an index. Remove the assigned lifecycle policies from an index or a data stream's backing indices. It also stops managing the indices.

``_ :param index: The name of the index to remove policy on """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/remove' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.remove_policy", path_parts=__path_parts, ) @_rewrite_parameters() def retry( self, *, index: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Retry a policy. Retry running the lifecycle policy for an index that is in the ERROR step. The API sets the policy back to the step where the error occurred and runs the step. Use the explain lifecycle state API to determine whether an index is in the ERROR step.

``_ :param index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_ilm/retry' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.retry", path_parts=__path_parts, ) @_rewrite_parameters() def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the ILM plugin. Start the index lifecycle management plugin if it is currently stopped. ILM is started automatically when the cluster is formed. Restarting ILM is necessary only when it has been stopped using the stop ILM API.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.start", path_parts=__path_parts, ) @_rewrite_parameters() def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop the ILM plugin. Halt all lifecycle management operations and stop the index lifecycle management plugin. This is useful when you are performing maintenance on the cluster and need to prevent ILM from performing any actions on your indices.

The API returns as soon as the stop request has been acknowledged, but the plugin might continue to run until in-progress operations complete and the plugin can be safely stopped. Use the get ILM status API to check whether ILM is running.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_ilm/stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ilm.stop", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/indices.py000066400000000000000000011071321506101734100250510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import HeadApiResponse, ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class IndicesClient(NamespacedClient): @_rewrite_parameters() def add_block( self, *, index: str, block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add an index block.

Add an index block to an index. Index blocks limit the operations allowed on an index by blocking specific operation types.

``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are adding blocks to. To allow the adding of blocks to indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. :param block: The block type to add to the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if block in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'block'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "block": _quote(block), } __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="indices.add_block", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analyzer", "attributes", "char_filter", "explain", "field", "filter", "normalizer", "text", "tokenizer", ), ) def analyze( self, *, index: t.Optional[str] = None, analyzer: t.Optional[str] = None, attributes: t.Optional[t.Sequence[str]] = None, char_filter: t.Optional[t.Sequence[t.Union[str, t.Mapping[str, t.Any]]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, field: t.Optional[str] = None, filter: t.Optional[t.Sequence[t.Union[str, t.Mapping[str, t.Any]]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, normalizer: t.Optional[str] = None, pretty: t.Optional[bool] = None, text: t.Optional[t.Union[str, t.Sequence[str]]] = None, tokenizer: t.Optional[t.Union[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get tokens from text analysis. The analyze API performs analysis on a text string and returns the resulting tokens.

Generating excessive amount of tokens may cause a node to run out of memory. The index.analyze.max_token_count setting enables you to limit the number of tokens that can be produced. If more than this limit of tokens gets generated, an error occurs. The _analyze endpoint without a specified index will always use 10000 as its limit.

``_ :param index: Index used to derive the analyzer. If specified, the `analyzer` or field parameter overrides this value. If no index is specified or the index does not have a default analyzer, the analyze API uses the standard analyzer. :param analyzer: The name of the analyzer that should be applied to the provided `text`. This could be a built-in analyzer, or an analyzer that’s been configured in the index. :param attributes: Array of token attributes used to filter the output of the `explain` parameter. :param char_filter: Array of character filters used to preprocess characters before the tokenizer. :param explain: If `true`, the response includes token attributes and additional details. :param field: Field used to derive the analyzer. To use this parameter, you must specify an index. If specified, the `analyzer` parameter overrides this value. :param filter: Array of token filters used to apply after the tokenizer. :param normalizer: Normalizer to use to convert text into a single token. :param text: Text to analyze. If an array of strings is provided, it is analyzed as a multi-value field. :param tokenizer: Tokenizer to use to convert text into tokens. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_analyze' else: __path_parts = {} __path = "/_analyze" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analyzer is not None: __body["analyzer"] = analyzer if attributes is not None: __body["attributes"] = attributes if char_filter is not None: __body["char_filter"] = char_filter if explain is not None: __body["explain"] = explain if field is not None: __body["field"] = field if filter is not None: __body["filter"] = filter if normalizer is not None: __body["normalizer"] = normalizer if text is not None: __body["text"] = text if tokenizer is not None: __body["tokenizer"] = tokenizer if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.analyze", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def cancel_migrate_reindex( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a migration reindex operation.

Cancel a migration reindex attempt for a data stream or index.

``_ :param index: The index or data stream name """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_migration/reindex/{__path_parts["index"]}/_cancel' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.cancel_migrate_reindex", path_parts=__path_parts, ) @_rewrite_parameters() def clear_cache( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fielddata: t.Optional[bool] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[bool] = None, request: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the cache. Clear the cache of one or more indices. For data streams, the API clears the caches of the stream's backing indices.

By default, the clear cache API clears all caches. To clear only specific caches, use the fielddata, query, or request parameters. To clear the cache only of specific fields, use the fields parameter.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fielddata: If `true`, clears the fields cache. Use the `fields` parameter to clear the cache of specific fields only. :param fields: Comma-separated list of field names used to limit the `fielddata` parameter. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param query: If `true`, clears the query cache. :param request: If `true`, clears the request cache. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_cache/clear' else: __path_parts = {} __path = "/_cache/clear" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fielddata is not None: __query["fielddata"] = fielddata if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if query is not None: __query["query"] = query if request is not None: __query["request"] = request __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.clear_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) def clone( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clone an index. Clone an existing index into a new index. Each original primary shard is cloned into a new primary shard in the new index.

IMPORTANT: Elasticsearch does not apply index templates to the resulting index. The API also does not copy index metadata from the original index. Index metadata includes aliases, index lifecycle management phase definitions, and cross-cluster replication (CCR) follower information. For example, if you clone a CCR follower index, the resulting clone will not be a follower index.

The clone API copies most index settings from the source index to the resulting index, with the exception of index.number_of_replicas and index.auto_expand_replicas. To set the number of replicas in the resulting index, configure these settings in the clone request.

Cloning works as follows:

  • First, it creates a new target index with the same definition as the source index.
  • Then it hard-links segments from the source index into the target index. If the file system does not support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • Finally, it recovers the target index as though it were a closed index which had just been re-opened.

IMPORTANT: Indices can only be cloned if they meet the following requirements:

  • The index must be marked as read-only and have a cluster health status of green.
  • The target index must not exist.
  • The source index must have the same number of primary shards as the target index.
  • The node handling the clone process must have sufficient free disk space to accommodate a second copy of the existing index.

The current write index on a data stream cannot be cloned. In order to clone the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be cloned.

NOTE: Mappings cannot be specified in the _clone request. The mappings of the source index will be used for the target index.

Monitor the cloning process

The cloning process can be monitored with the cat recovery API or the cluster health API can be used to wait until all primary shards have been allocated by setting the wait_for_status parameter to yellow.

The _clone API returns as soon as the target index has been added to the cluster state, before any shards have been allocated. At this point, all shards are in the state unassigned. If, for any reason, the target index can't be allocated, its primary shard will remain unassigned until it can be allocated on that node.

Once the primary shard is allocated, it moves to state initializing, and the clone process begins. When the clone operation completes, the shard will become active. At that point, Elasticsearch will try to allocate any replicas and may decide to relocate the primary shard to another node.

Wait for active shards

Because the clone operation creates a new index to clone the shards to, the wait for active shards setting on index creation applies to the clone index action as well.

``_ :param index: Name of the source index to clone. :param target: Name of the target index to create. :param aliases: Aliases for the resulting index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_clone/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.clone", path_parts=__path_parts, ) @_rewrite_parameters() def close( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close an index. A closed index is blocked for read or write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. Closed indices do not have to maintain internal data structures for indexing or searching documents, which results in a smaller overhead on the cluster.

When opening or closing an index, the master node is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened and closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behaviour can be turned off using the ignore_unavailable=true parameter.

By default, you must explicitly name the indices you are opening or closing. To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_close' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.close", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "mappings", "settings"), ) def create( self, *, index: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an index. You can use the create index API to add a new index to an Elasticsearch cluster. When creating an index, you can specify the following:

  • Settings for the index.
  • Mappings for fields in the index.
  • Index aliases

Wait for active shards

By default, index creation will only return a response to the client when the primary copies of each shard have been started, or the request times out. The index creation response will indicate what happened. For example, acknowledged indicates whether the index was successfully created in the cluster, while shards_acknowledged indicates whether the requisite number of shard copies were started for each shard in the index before timing out. Note that it is still possible for either acknowledged or shards_acknowledged to be false, but for the index creation to be successful. These values simply indicate whether the operation completed before the timeout. If acknowledged is false, the request timed out before the cluster state was updated with the newly created index, but it probably will be created sometime soon. If shards_acknowledged is false, then the request timed out before the requisite number of shards were started (by default just the primaries), even if the cluster state was successfully updated to reflect the newly created index (that is to say, acknowledged is true).

You can change the default of only waiting for the primary shards to start through the index setting index.write.wait_for_active_shards. Note that changing this setting will also affect the wait_for_active_shards value on all subsequent write operations.

``_ :param index: Name of the index you wish to create. Index names must meet the following criteria: * Lowercase only * Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, or `#` * Indices prior to 7.0 could contain a colon (`:`), but that has been deprecated and will not be supported in later versions * Cannot start with `-`, `_`, or `+` * Cannot be `.` or `..` * Cannot be longer than 255 bytes (note thtat it is bytes, so multi-byte characters will reach the limit faster) * Names starting with `.` are deprecated, except for hidden indices and internal indices managed by plugins :param aliases: Aliases for the index. :param mappings: Mapping for fields in the index. If specified, this mapping can include: - Field names - Field data types - Mapping parameters :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if mappings is not None: __body["mappings"] = mappings if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.create", path_parts=__path_parts, ) @_rewrite_parameters() def create_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a data stream.

You must have a matching index template with data stream enabled.

``_ :param name: Name of the data stream, which must meet the following criteria: Lowercase only; Cannot include `\\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, `,`, `#`, `:`, or a space character; Cannot start with `-`, `_`, `+`, or `.ds-`; Cannot be `.` or `..`; Cannot be longer than 255 bytes. Multi-byte characters count towards this limit faster. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="indices.create_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_name="create_from", ) @_stability_warning(Stability.EXPERIMENTAL) def create_from( self, *, source: str, dest: str, create_from: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an index from a source index.

Copy the mappings and settings from the source index to a destination index while allowing request settings and mappings to override the source values.

``_ :param source: The source index or data stream name :param dest: The destination index or data stream name :param create_from: """ if source in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'source'") if dest in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'dest'") if create_from is None and body is None: raise ValueError( "Empty value passed for parameters 'create_from' and 'body', one of them should be set." ) elif create_from is not None and body is not None: raise ValueError("Cannot set both 'create_from' and 'body'") __path_parts: t.Dict[str, str] = { "source": _quote(source), "dest": _quote(dest), } __path = f'/_create_from/{__path_parts["source"]}/{__path_parts["dest"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = create_from if create_from is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.create_from", path_parts=__path_parts, ) @_rewrite_parameters() def data_streams_stats( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream stats.

Get statistics for one or more data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Wildcard expressions (`*`) are supported. To target all data streams in a cluster, omit this parameter or use `*`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_stats' else: __path_parts = {} __path = "/_data_stream/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.data_streams_stats", path_parts=__path_parts, ) @_rewrite_parameters() def delete( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete indices. Deleting an index deletes its documents, shards, and metadata. It does not delete related Kibana components, such as data views, visualizations, or dashboards.

You cannot delete the current write index of a data stream. To delete the index, you must roll over the data stream so a new write index is created. You can then use the delete index API to delete the previous write index.

``_ :param index: Comma-separated list of indices to delete. You cannot specify index aliases. By default, this parameter does not support wildcards (`*`) or `_all`. To use wildcards or `_all`, set the `action.destructive_requires_name` cluster setting to `false`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete", path_parts=__path_parts, ) @_rewrite_parameters() def delete_alias( self, *, index: t.Union[str, t.Sequence[str]], name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an alias. Removes a data stream or index from an alias.

``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). :param name: Comma-separated list of aliases to remove. Supports wildcards (`*`). To remove all aliases, use `*` or `_all`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_alias", path_parts=__path_parts, ) @_rewrite_parameters() def delete_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data stream lifecycles. Removes the data stream lifecycle from a data stream, rendering it not managed by the data stream lifecycle.

``_ :param name: A comma-separated list of data streams of which the data stream lifecycle will be deleted; use `*` to get all data streams :param expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open) :param master_timeout: Specify timeout for connection to master :param timeout: Explicit timestamp for the document """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def delete_data_stream( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data streams. Deletes one or more data streams and their backing indices.

``_ :param name: Comma-separated list of data streams to delete. Wildcard (`*`) expressions are supported. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values,such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() def delete_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete data stream options. Removes the data stream options from a data stream.

``_ :param name: A comma-separated list of data streams of which the data stream options will be deleted; use `*` to get all data streams :param expand_wildcards: Whether wildcard expressions should get expanded to open or closed indices (default: open) :param master_timeout: Specify timeout for connection to master :param timeout: Explicit timestamp for the document """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters() def delete_index_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an index template. The provided may contain multiple template names separated by a comma. If multiple template names are specified then there is no wildcard support and the provided names should match completely with existing templates.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_index_template", path_parts=__path_parts, ) @_rewrite_parameters() def delete_template( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a legacy index template. IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: The name of the legacy index template to delete. Wildcard (`*`) expressions are supported. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.delete_template", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def disk_usage( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flush: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, run_expensive_tasks: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Analyze the index disk usage. Analyze the disk usage of each field of an index or data stream. This API might not support indices created in previous Elasticsearch versions. The result of a small index can be inaccurate as some parts of an index might not be analyzed by the API.

NOTE: The total size of fields of the analyzed shards of the index in the response is usually smaller than the index store_size value because some small metadata files are ignored and some parts of data files might not be scanned by the API. Since stored fields are stored together in a compressed format, the sizes of stored fields are also estimates and can be inaccurate. The stored size of the _id field is likely underestimated while the _source field is overestimated.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. It’s recommended to execute this API with a single index (or the latest backing index of a data stream) as the API consumes resources significantly. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flush: If `true`, the API performs a flush before analysis. If `false`, the response may not include uncommitted data. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. :param run_expensive_tasks: Analyzing field disk usage is resource-intensive. To use the API, this parameter must be set to `true`. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_disk_usage' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flush is not None: __query["flush"] = flush if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if run_expensive_tasks is not None: __query["run_expensive_tasks"] = run_expensive_tasks __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.disk_usage", path_parts=__path_parts, ) @_rewrite_parameters( body_name="config", ) @_stability_warning(Stability.EXPERIMENTAL) def downsample( self, *, index: str, target_index: str, config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Downsample an index. Aggregate a time series (TSDS) index and store pre-computed statistical summaries (min, max, sum, value_count and avg) for each metric field grouped by a configured time interval. For example, a TSDS index that contains metrics sampled every 10 seconds can be downsampled to an hourly index. All documents within an hour interval are summarized and stored as a single document in the downsample index.

NOTE: Only indices in a time series data stream are supported. Neither field nor document level security can be defined on the source index. The source index must be read only (index.blocks.write: true).

``_ :param index: Name of the time series index to downsample. :param target_index: Name of the index to create. :param config: """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target_index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target_index'") if config is None and body is None: raise ValueError( "Empty value passed for parameters 'config' and 'body', one of them should be set." ) elif config is not None and body is not None: raise ValueError("Cannot set both 'config' and 'body'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target_index": _quote(target_index), } __path = f'/{__path_parts["index"]}/_downsample/{__path_parts["target_index"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = config if config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.downsample", path_parts=__path_parts, ) @_rewrite_parameters() def exists( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check indices. Check if one or more indices, index aliases, or data streams exist.

``_ :param index: Comma-separated list of data streams, indices, and aliases. Supports wildcards (`*`). :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. :param local: If `true`, the request retrieves information from the local node only. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists", path_parts=__path_parts, ) @_rewrite_parameters() def exists_alias( self, *, name: t.Union[str, t.Sequence[str]], index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check aliases.

Check if one or more data stream or index aliases exist.

``_ :param name: Comma-separated list of aliases to check. Supports wildcards (`*`). :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, requests that include a missing data stream or index in the target indices or data streams return an error. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_alias/{__path_parts["name"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_alias", path_parts=__path_parts, ) @_rewrite_parameters() def exists_index_template( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check index templates.

Check whether index templates exist.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param flat_settings: If true, returns settings in flat format. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_index_template", path_parts=__path_parts, ) @_rewrite_parameters() def exists_template( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> HeadApiResponse: """ .. raw:: html

Check existence of index templates. Get information about whether index templates exist. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: A comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. :param flat_settings: Indicates whether to use a flat format for the response. :param local: Indicates whether to get information from the local node only. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "HEAD", __path, params=__query, headers=__headers, endpoint_id="indices.exists_template", path_parts=__path_parts, ) @_rewrite_parameters() def explain_data_lifecycle( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the status for a data stream lifecycle. Get information about an index or data stream's current data stream lifecycle status, such as time since index creation, time since rollover, the lifecycle configuration managing the index, or any errors encountered during lifecycle execution.

``_ :param index: The name of the index to explain :param include_defaults: indicates if the API should return the default values the system uses for the index's lifecycle :param master_timeout: Specify timeout for connection to master """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_lifecycle/explain' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.explain_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def field_usage_stats( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get field usage stats. Get field usage information for each shard and field of an index. Field usage statistics are automatically captured when queries are running on a cluster. A shard-level search request that accesses a given field, even if multiple times during that request, is counted as a single use.

The response body reports the per-shard usage count of the data structures that back the fields in the index. A given request will increment each count by a maximum value of 1, even if the request accesses the same field multiple times.

``_ :param index: Comma-separated list or wildcard expression of index names used to limit the request. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param ignore_unavailable: If `true`, missing or closed indices are not included in the response. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_field_usage_stats' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.field_usage_stats", path_parts=__path_parts, ) @_rewrite_parameters() def flush( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_if_ongoing: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Flush data streams or indices. Flushing a data stream or index is the process of making sure that any data that is currently only stored in the transaction log is also permanently stored in the Lucene index. When restarting, Elasticsearch replays any unflushed operations from the transaction log into the Lucene index to bring it back into the state that it was in before the restart. Elasticsearch automatically triggers flushes as needed, using heuristics that trade off the size of the unflushed transaction log against the cost of performing each flush.

After each operation has been flushed it is permanently stored in the Lucene index. This may mean that there is no need to maintain an additional copy of it in the transaction log. The transaction log is made up of multiple files, called generations, and Elasticsearch will delete any generation files when they are no longer needed, freeing up disk space.

It is also possible to trigger a flush on one or more indices using the flush API, although it is rare for users to need to call this API directly. If you call the flush API after indexing some documents then a successful response indicates that Elasticsearch has flushed all the documents that were indexed before the flush API was called.

``_ :param index: Comma-separated list of data streams, indices, and aliases to flush. Supports wildcards (`*`). To flush all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param force: If `true`, the request forces a flush even if there are no changes to commit to the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param wait_if_ongoing: If `true`, the flush operation blocks until execution when another flush operation is running. If `false`, Elasticsearch returns an error if you request a flush when another flush operation is running. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_flush' else: __path_parts = {} __path = "/_flush" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if wait_if_ongoing is not None: __query["wait_if_ongoing"] = wait_if_ongoing __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.flush", path_parts=__path_parts, ) @_rewrite_parameters() def forcemerge( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flush: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, max_num_segments: t.Optional[int] = None, only_expunge_deletes: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Force a merge. Perform the force merge operation on the shards of one or more indices. For data streams, the API forces a merge on the shards of the stream's backing indices.

Merging reduces the number of segments in each shard by merging some of them together and also frees up the space used by deleted documents. Merging normally happens automatically, but sometimes it is useful to trigger a merge manually.

WARNING: We recommend force merging only a read-only index (meaning the index is no longer receiving writes). When documents are updated or deleted, the old version is not immediately removed but instead soft-deleted and marked with a "tombstone". These soft-deleted documents are automatically cleaned up during regular segment merges. But force merge can cause very large (greater than 5 GB) segments to be produced, which are not eligible for regular merges. So the number of soft-deleted documents can then grow rapidly, resulting in higher disk usage and worse search performance. If you regularly force merge an index receiving writes, this can also make snapshots more expensive, since the new documents can't be backed up incrementally.

Blocks during a force merge

Calls to this API block until the merge is complete (unless request contains wait_for_completion=false). If the client connection is lost before completion then the force merge process will continue in the background. Any new requests to force merge the same indices will also block until the ongoing force merge is complete.

Running force merge asynchronously

If the request contains wait_for_completion=false, Elasticsearch performs some preflight checks, launches the request, and returns a task you can use to get the status of the task. However, you can not cancel this task as the force merge task is not cancelable. Elasticsearch creates a record of this task as a document at _tasks/<task_id>. When you are done with a task, you should delete the task document so Elasticsearch can reclaim the space.

Force merging multiple indices

You can force merge multiple indices with a single request by targeting:

  • One or more data streams that contain multiple backing indices
  • Multiple indices
  • One or more aliases
  • All data streams and indices in a cluster

Each targeted shard is force-merged separately using the force_merge threadpool. By default each node only has a single force_merge thread which means that the shards on that node are force-merged one at a time. If you expand the force_merge threadpool on a node then it will force merge its shards in parallel

Force merge makes the storage for the shard being merged temporarily increase, as it may require free space up to triple its size in case max_num_segments parameter is set to 1, to rewrite all segments into a new one.

Data streams and time-based indices

Force-merging is useful for managing a data stream's older backing indices and other time-based indices, particularly after a rollover. In these cases, each index only receives indexing traffic for a certain period of time. Once an index receive no more writes, its shards can be force-merged to a single segment. This can be a good idea because single-segment shards can sometimes use simpler and more efficient data structures to perform searches. For example:

POST /.ds-my-data-stream-2099.03.07-000001/_forcemerge?max_num_segments=1
          
``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param flush: Specify whether the index should be flushed after performing the operation (default: true) :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param max_num_segments: The number of segments the index should be merged into (default: dynamic) :param only_expunge_deletes: Specify whether the operation should only expunge deleted documents :param wait_for_completion: Should the request wait until the force merge is completed. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_forcemerge' else: __path_parts = {} __path = "/_forcemerge" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flush is not None: __query["flush"] = flush if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if max_num_segments is not None: __query["max_num_segments"] = max_num_segments if only_expunge_deletes is not None: __query["only_expunge_deletes"] = only_expunge_deletes if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.forcemerge", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, features: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["aliases", "mappings", "settings"]]], t.Union[str, t.Literal["aliases", "mappings", "settings"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index information. Get information about one or more indices. For data streams, the API returns information about the stream’s backing indices.

``_ :param index: Comma-separated list of data streams, indices, and index aliases used to limit the request. Wildcard expressions (*) are supported. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting foo*,bar* returns an error if an index starts with foo but no index starts with bar. :param expand_wildcards: Type of index that wildcard expressions can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as open,hidden. :param features: Return only information on specified index features :param flat_settings: If true, returns settings in flat format. :param ignore_unavailable: If false, requests that target a missing index return an error. :param include_defaults: If true, return all default settings in the response. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if features is not None: __query["features"] = features if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get", path_parts=__path_parts, ) @_rewrite_parameters() def get_alias( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get aliases. Retrieves information for one or more data stream or index aliases.

``_ :param index: Comma-separated list of data streams or indices used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param name: Comma-separated list of aliases to retrieve. Supports wildcards (`*`). To retrieve all aliases, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_alias' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_alias/{__path_parts["name"]}' else: __path_parts = {} __path = "/_alias" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_alias", path_parts=__path_parts, ) @_rewrite_parameters() def get_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream lifecycles.

Get the data stream lifecycle configuration of one or more data streams.

``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param include_defaults: If `true`, return all default settings in the response. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def get_data_lifecycle_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream lifecycle stats. Get statistics about the data streams that are managed by a data stream lifecycle.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_lifecycle/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_lifecycle_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_data_stream( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data streams.

Get information about one or more data streams.

``_ :param name: Comma-separated list of data stream names used to limit the request. Wildcard (`*`) expressions are supported. If omitted, all data streams are returned. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param verbose: Whether the maximum timestamp for each data stream should be calculated and returned. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}' else: __path_parts = {} __path = "/_data_stream" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() def get_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream options.

Get the data stream options configuration of one or more data streams.

``_ :param name: Comma-separated list of data streams to limit the request. Supports wildcards (`*`). To target all data streams, omit this parameter or use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters() def get_data_stream_settings( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data stream settings.

Get setting information for one or more data streams.

``_ :param name: A comma-separated list of data streams or data stream patterns. Supports wildcards (`*`). :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_settings' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_data_stream_settings", path_parts=__path_parts, ) @_rewrite_parameters() def get_field_mapping( self, *, fields: t.Union[str, t.Sequence[str]], index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get mapping definitions. Retrieves mapping definitions for one or more fields. For data streams, the API retrieves field mappings for the stream’s backing indices.

This API is useful if you don't need a complete mapping or if an index mapping contains a large number of fields.

``_ :param fields: Comma-separated list or wildcard expression of fields used to limit returned information. Supports wildcards (`*`). :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. """ if fields in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'fields'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and fields not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "fields": _quote(fields)} __path = f'/{__path_parts["index"]}/_mapping/field/{__path_parts["fields"]}' elif fields not in SKIP_IN_PATH: __path_parts = {"fields": _quote(fields)} __path = f'/_mapping/field/{__path_parts["fields"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_field_mapping", path_parts=__path_parts, ) @_rewrite_parameters() def get_index_template( self, *, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index templates. Get information about one or more index templates.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (*) expressions are supported. :param flat_settings: If true, returns settings in flat format. :param include_defaults: If true, returns all relevant default configurations for the index template. :param local: If true, the request retrieves information from the local node only. Defaults to false, which means information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_index_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_index_template", path_parts=__path_parts, ) @_rewrite_parameters() def get_mapping( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get mapping definitions. For data streams, the API retrieves mappings for the stream’s backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mapping' else: __path_parts = {} __path = "/_mapping" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_mapping", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_migrate_reindex_status( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the migration reindexing status.

Get the status of a migration reindex attempt for a data stream or index.

``_ :param index: The index or data stream name. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/_migration/reindex/{__path_parts["index"]}/_status' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_migrate_reindex_status", path_parts=__path_parts, ) @_rewrite_parameters() def get_settings( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index settings. Get setting information for one or more indices. For data streams, it returns setting information for the stream's backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param name: Comma-separated list or wildcard expression of settings to retrieve. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with foo but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param include_defaults: If `true`, return all default settings in the response. :param local: If `true`, the request retrieves information from the local node only. If `false`, information is retrieved from the master node. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_settings/{__path_parts["name"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_settings' elif name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_settings/{__path_parts["name"]}' else: __path_parts = {} __path = "/_settings" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_defaults is not None: __query["include_defaults"] = include_defaults if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() def get_template( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get legacy index templates. Get information about one or more index templates.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

``_ :param name: Comma-separated list of index template names used to limit the request. Wildcard (`*`) expressions are supported. To return all index templates, omit this parameter or use a value of `_all` or `*`. :param flat_settings: If `true`, returns settings in flat format. :param local: If `true`, the request retrieves information from the local node only. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' else: __path_parts = {} __path = "/_template" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.get_template", path_parts=__path_parts, ) @_rewrite_parameters( body_name="reindex", ) @_stability_warning(Stability.EXPERIMENTAL) def migrate_reindex( self, *, reindex: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reindex legacy backing indices.

Reindex all legacy backing indices for a data stream. This operation occurs in a persistent task. The persistent task ID is returned immediately and the reindexing work is completed in that task.

``_ :param reindex: """ if reindex is None and body is None: raise ValueError( "Empty value passed for parameters 'reindex' and 'body', one of them should be set." ) elif reindex is not None and body is not None: raise ValueError("Cannot set both 'reindex' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_migration/reindex" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = reindex if reindex is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.migrate_reindex", path_parts=__path_parts, ) @_rewrite_parameters() def migrate_to_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Convert an index alias to a data stream. Converts an index alias to a data stream. You must have a matching index template that is data stream enabled. The alias must meet the following criteria: The alias must have a write index; All indices for the alias must have a @timestamp field mapping of a date or date_nanos field type; The alias must not have any filters; The alias must not use custom routing. If successful, the request removes the alias and creates a data stream with the same name. The indices for the alias become hidden backing indices for the stream. The write index for the alias becomes the write index for the stream.

``_ :param name: Name of the index alias to convert to a data stream. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/_migrate/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.migrate_to_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions",), ) def modify_data_stream( self, *, actions: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data streams. Performs one or more data stream modification actions in a single atomic operation.

``_ :param actions: Actions to perform. """ if actions is None and body is None: raise ValueError("Empty value passed for parameter 'actions'") __path_parts: t.Dict[str, str] = {} __path = "/_data_stream/_modify" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if actions is not None: __body["actions"] = actions __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.modify_data_stream", path_parts=__path_parts, ) @_rewrite_parameters() def open( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open a closed index. For data streams, the API opens any closed backing indices.

A closed index is blocked for read/write operations and does not allow all operations that opened indices allow. It is not possible to index documents or to search for documents in a closed index. This allows closed indices to not have to maintain internal data structures for indexing or searching documents, resulting in a smaller overhead on the cluster.

When opening or closing an index, the master is responsible for restarting the index shards to reflect the new state of the index. The shards will then go through the normal recovery process. The data of opened or closed indices is automatically replicated by the cluster to ensure that enough shard copies are safely kept around at all times.

You can open and close multiple indices. An error is thrown if the request explicitly refers to a missing index. This behavior can be turned off by using the ignore_unavailable=true parameter.

By default, you must explicitly name the indices you are opening or closing. To open or close indices with _all, *, or other wildcard expressions, change the action.destructive_requires_name setting to false. This setting can also be changed with the cluster update settings API.

Closed indices consume a significant amount of disk-space which can cause problems in managed environments. Closing indices can be turned off with the cluster settings API by setting cluster.indices.close.enable to false.

Because opening or closing an index allocates its shards, the wait_for_active_shards setting on index creation applies to the _open and _close index actions as well.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). By default, you must explicitly name the indices you using to limit the request. To limit a request using `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to false. You can update this setting in the `elasticsearch.yml` file or using the cluster update settings API. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_open' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.open", path_parts=__path_parts, ) @_rewrite_parameters() def promote_data_stream( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Promote a data stream. Promote a data stream from a replicated data stream managed by cross-cluster replication (CCR) to a regular data stream.

With CCR auto following, a data stream from a remote cluster can be replicated to the local cluster. These data streams can't be rolled over in the local cluster. These replicated data streams roll over only if the upstream data stream rolls over. In the event that the remote cluster is no longer available, the data stream in the local cluster can be promoted to a regular data stream, which allows these data streams to be rolled over in the local cluster.

NOTE: When promoting a data stream, ensure the local cluster has a data stream enabled index template that matches the data stream. If this is missing, the data stream will not be able to roll over until a matching index template is created. This will affect the lifecycle management of the data stream and interfere with the data stream size and retention.

``_ :param name: The name of the data stream :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/_promote/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.promote_data_stream", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "filter", "index_routing", "is_write_index", "routing", "search_routing", ), ) def put_alias( self, *, index: t.Union[str, t.Sequence[str]], name: str, error_trace: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_routing: t.Optional[str] = None, is_write_index: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, routing: t.Optional[str] = None, search_routing: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an alias. Adds a data stream or index to an alias.

``_ :param index: Comma-separated list of data streams or indices to add. Supports wildcards (`*`). Wildcard patterns that match both data streams and indices return an error. :param name: Alias to update. If the alias doesn’t exist, the request creates it. Index alias names support date math. :param filter: Query used to limit documents the alias can access. :param index_routing: Value used to route indexing operations to a specific shard. If specified, this overwrites the `routing` value for indexing operations. Data stream aliases don’t support this parameter. :param is_write_index: If `true`, sets the write index or data stream for the alias. If an alias points to multiple indices or data streams and `is_write_index` isn’t set, the alias rejects write requests. If an index alias points to one index and `is_write_index` isn’t set, the index automatically acts as the write index. Data stream aliases don’t automatically set a write data stream, even if the alias points to one data stream. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param routing: Value used to route indexing and search operations to a specific shard. Data stream aliases don’t support this parameter. :param search_routing: Value used to route search operations to a specific shard. If specified, this overwrites the `routing` value for search operations. Data stream aliases don’t support this parameter. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"index": _quote(index), "name": _quote(name)} __path = f'/{__path_parts["index"]}/_alias/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if filter is not None: __body["filter"] = filter if index_routing is not None: __body["index_routing"] = index_routing if is_write_index is not None: __body["is_write_index"] = is_write_index if routing is not None: __body["routing"] = routing if search_routing is not None: __body["search_routing"] = search_routing if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data_retention", "downsampling", "enabled"), ) def put_data_lifecycle( self, *, name: t.Union[str, t.Sequence[str]], data_retention: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, downsampling: t.Optional[t.Mapping[str, t.Any]] = None, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream lifecycles. Update the data stream lifecycle of the specified data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. :param data_retention: If defined, every document added to this data stream will be stored at least for this time frame. Any time after this duration the document could be deleted. When empty, every document in this data stream will be stored indefinitely. :param downsampling: The downsampling configuration to execute for the managed backing index after rollover. :param enabled: If defined, it turns data stream lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (enabled: `false`) will have no effect on the data stream. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_lifecycle' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if data_retention is not None: __body["data_retention"] = data_retention if downsampling is not None: __body["downsampling"] = downsampling if enabled is not None: __body["enabled"] = enabled if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("failure_store",), ) def put_data_stream_options( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, failure_store: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream options. Update the data stream options of the specified data streams.

``_ :param name: Comma-separated list of data streams used to limit the request. Supports wildcards (`*`). To target all data streams use `*` or `_all`. :param expand_wildcards: Type of data stream that wildcard patterns can match. Supports comma-separated values, such as `open,hidden`. :param failure_store: If defined, it will update the failure store configuration of every data stream resolved by the name expression. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_options' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if failure_store is not None: __body["failure_store"] = failure_store if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_stream_options", path_parts=__path_parts, ) @_rewrite_parameters( body_name="settings", ) def put_data_stream_settings( self, *, name: t.Union[str, t.Sequence[str]], settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update data stream settings.

This API can be used to override settings on specific data streams. These overrides will take precedence over what is specified in the template that the data stream matches. To prevent your data stream from getting into an invalid state, only certain settings are allowed. If possible, the setting change is applied to all backing indices. Otherwise, it will be applied when the data stream is next rolled over.

``_ :param name: A comma-separated list of data streams or data stream patterns. :param settings: :param dry_run: If `true`, the request does not actually change the settings on any data streams or indices. Instead, it simulates changing the settings and reports back to the user what would have happened had these settings actually been applied. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if settings is None and body is None: raise ValueError( "Empty value passed for parameters 'settings' and 'body', one of them should be set." ) elif settings is not None and body is not None: raise ValueError("Cannot set both 'settings' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_data_stream/{__path_parts["name"]}/_settings' __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_data_stream_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_auto_create", "composed_of", "data_stream", "deprecated", "ignore_missing_component_templates", "index_patterns", "meta", "priority", "template", "version", ), parameter_aliases={"_meta": "meta"}, ) def put_index_template( self, *, name: str, allow_auto_create: t.Optional[bool] = None, cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_missing_component_templates: t.Optional[t.Sequence[str]] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, template: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices.

Elasticsearch applies templates to new indices based on an wildcard pattern that matches the index name. Index templates are applied during data stream or index creation. For data streams, these settings and mappings are applied when the stream's backing indices are created. Settings and mappings specified in a create index API request override any settings or mappings specified in an index template. Changes to index templates do not affect existing indices, including the existing backing indices of a data stream.

You can use C-style /* *\\/ block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket.

Multiple matching templates

If multiple index templates match the name of a new index or data stream, the template with the highest priority is used.

Multiple templates with overlapping index patterns at the same priority are not allowed and an error will be thrown when attempting to create a template matching an existing index template at identical priorities.

Composing aliases, mappings, and settings

When multiple component templates are specified in the composed_of field for an index template, they are merged in the order specified, meaning that later component templates override earlier component templates. Any mappings, settings, or aliases from the parent index template are merged in next. Finally, any configuration on the index request itself is merged. Mapping definitions are merged recursively, which means that later mapping components can introduce new field mappings and update the mapping configuration. If a field mapping is already contained in an earlier component, its definition will be completely overwritten by the later one. This recursive merging strategy applies not only to field mappings, but also root options like dynamic_templates and meta. If an earlier component contains a dynamic_templates block, then by default new dynamic_templates entries are appended onto the end. If an entry already exists with the same key, then it is overwritten by the new definition.

``_ :param name: Index or template name :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. :param cause: User defined reason for creating/updating the index template :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. :param create: If `true`, this request cannot replace or update existing index templates. :param data_stream: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param ignore_missing_component_templates: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist :param index_patterns: Name of the index template to create. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the index template. It may have any contents. It is not automatically generated or used by Elasticsearch. This user-defined object is stored in the cluster state, so keeping it short is preferable To unset the metadata, replace the template without specifying it. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. External systems can use these version numbers to simplify template management. To unset a version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if allow_auto_create is not None: __body["allow_auto_create"] = allow_auto_create if composed_of is not None: __body["composed_of"] = composed_of if data_stream is not None: __body["data_stream"] = data_stream if deprecated is not None: __body["deprecated"] = deprecated if ignore_missing_component_templates is not None: __body["ignore_missing_component_templates"] = ( ignore_missing_component_templates ) if index_patterns is not None: __body["index_patterns"] = index_patterns if meta is not None: __body["_meta"] = meta if priority is not None: __body["priority"] = priority if template is not None: __body["template"] = template if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_index_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "date_detection", "dynamic", "dynamic_date_formats", "dynamic_templates", "field_names", "meta", "numeric_detection", "properties", "routing", "runtime", "source", ), parameter_aliases={ "_field_names": "field_names", "_meta": "meta", "_routing": "routing", "_source": "source", }, ) def put_mapping( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, date_detection: t.Optional[bool] = None, dynamic: t.Optional[ t.Union[str, t.Literal["false", "runtime", "strict", "true"]] ] = None, dynamic_date_formats: t.Optional[t.Sequence[str]] = None, dynamic_templates: t.Optional[ t.Sequence[t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, field_names: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, numeric_detection: t.Optional[bool] = None, pretty: t.Optional[bool] = None, properties: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, routing: t.Optional[t.Mapping[str, t.Any]] = None, runtime: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, write_index_only: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update field mappings. Add new fields to an existing data stream or index. You can use the update mapping API to:

  • Add a new field to an existing index
  • Update mappings for multiple indices in a single request
  • Add new properties to an object field
  • Enable multi-fields for an existing field
  • Update supported mapping parameters
  • Change a field's mapping using reindexing
  • Rename a field using a field alias

Learn how to use the update mapping API with practical examples in the Update mapping API examples guide.

``_ :param index: A comma-separated list of index names the mapping should be added to (supports wildcards); use `_all` or omit to add the mapping on all indices. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param date_detection: Controls whether dynamic date detection is enabled. :param dynamic: Controls whether new fields are added dynamically. :param dynamic_date_formats: If date detection is enabled then new string fields are checked against 'dynamic_date_formats' and if the value matches then a new date field is added instead of string. :param dynamic_templates: Specify dynamic templates for the mapping. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param field_names: Control whether field names are enabled for the index. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: A mapping type can have custom meta data associated with it. These are not used at all by Elasticsearch, but can be used to store application-specific metadata. :param numeric_detection: Automatically map strings into numeric data types for all fields. :param properties: Mapping for a field. For new fields, this mapping can include: - Field name - Field data type - Mapping parameters :param routing: Enable making a routing value required on indexed documents. :param runtime: Mapping of runtime fields for the index. :param source: Control whether the _source field is enabled on the index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param write_index_only: If `true`, the mappings are applied only to the current write index for the target. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_mapping' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if write_index_only is not None: __query["write_index_only"] = write_index_only if not __body: if date_detection is not None: __body["date_detection"] = date_detection if dynamic is not None: __body["dynamic"] = dynamic if dynamic_date_formats is not None: __body["dynamic_date_formats"] = dynamic_date_formats if dynamic_templates is not None: __body["dynamic_templates"] = dynamic_templates if field_names is not None: __body["_field_names"] = field_names if meta is not None: __body["_meta"] = meta if numeric_detection is not None: __body["numeric_detection"] = numeric_detection if properties is not None: __body["properties"] = properties if routing is not None: __body["_routing"] = routing if runtime is not None: __body["runtime"] = runtime if source is not None: __body["_source"] = source __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_mapping", path_parts=__path_parts, ) @_rewrite_parameters( body_name="settings", ) def put_settings( self, *, settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, preserve_existing: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reopen: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update index settings. Changes dynamic index settings in real time. For data streams, index setting changes are applied to all backing indices by default.

To revert a setting to the default value, use a null value. The list of per-index settings that can be updated dynamically on live indices can be found in index settings documentation. To preserve existing settings from being updated, set the preserve_existing parameter to true.

For performance optimization during bulk indexing, you can disable the refresh interval. Refer to disable refresh interval for an example. There are multiple valid ways to represent index settings in the request body. You can specify only the setting, for example:

{
            "number_of_replicas": 1
          }
          

Or you can use an index setting object:

{
            "index": {
              "number_of_replicas": 1
            }
          }
          

Or you can use dot annotation:

{
            "index.number_of_replicas": 1
          }
          

Or you can embed any of the aforementioned options in a settings object. For example:

{
            "settings": {
              "index": {
                "number_of_replicas": 1
              }
            }
          }
          

NOTE: You can only define new analyzers on closed indices. To add an analyzer, you must close the index, define the analyzer, and reopen the index. You cannot close the write index of a data stream. To update the analyzer for a data stream's write index and future backing indices, update the analyzer in the index template used by the stream. Then roll over the data stream to apply the new analyzer to the stream's write index and future backing indices. This affects searches and any new data added to the stream after the rollover. However, it does not affect the data stream's backing indices or their existing data. To change the analyzer for existing backing indices, you must create a new data stream and reindex your data into it. Refer to updating analyzers on existing indices for step-by-step examples.

``_ :param settings: :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param flat_settings: If `true`, returns settings in flat format. :param ignore_unavailable: If `true`, returns settings in flat format. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param preserve_existing: If `true`, existing index settings remain unchanged. :param reopen: Whether to close and reopen the index to apply non-dynamic settings. If set to `true` the indices to which the settings are being applied will be closed temporarily and then reopened in order to apply the changes. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if settings is None and body is None: raise ValueError( "Empty value passed for parameters 'settings' and 'body', one of them should be set." ) elif settings is not None and body is not None: raise ValueError("Cannot set both 'settings' and 'body'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_settings' else: __path_parts = {} __path = "/_settings" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if preserve_existing is not None: __query["preserve_existing"] = preserve_existing if pretty is not None: __query["pretty"] = pretty if reopen is not None: __query["reopen"] = reopen if timeout is not None: __query["timeout"] = timeout __body = settings if settings is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aliases", "index_patterns", "mappings", "order", "settings", "version", ), ) def put_template( self, *, name: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, cause: t.Optional[str] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, order: t.Optional[int] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a legacy index template. Index templates define settings, mappings, and aliases that can be applied automatically to new indices. Elasticsearch applies templates to new indices based on an index pattern that matches the index name.

IMPORTANT: This documentation is about legacy index templates, which are deprecated and will be replaced by the composable templates introduced in Elasticsearch 7.8.

Composable templates always take precedence over legacy templates. If no composable template matches a new index, matching legacy templates are applied according to their order.

Index templates are only applied during index creation. Changes to index templates do not affect existing indices. Settings and mappings specified in create index API requests override any settings or mappings specified in an index template.

You can use C-style /* *\\/ block comments in index templates. You can include comments anywhere in the request body, except before the opening curly bracket.

Indices matching multiple templates

Multiple index templates can potentially match an index, in this case, both the settings and mappings are merged into the final configuration of the index. The order of the merging can be controlled using the order parameter, with lower order being applied first, and higher orders overriding them. NOTE: Multiple matching templates with the same order value will result in a non-deterministic merging order.

``_ :param name: The name of the template :param aliases: Aliases for the index. :param cause: User defined reason for creating/updating the index template :param create: If true, this request cannot replace or update existing index templates. :param index_patterns: Array of wildcard expressions used to match the names of indices during creation. :param mappings: Mapping for fields in the index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param order: Order in which Elasticsearch applies this template if index matches multiple templates. Templates with lower 'order' values are merged first. Templates with higher 'order' values are merged later, overriding templates with lower values. :param settings: Configuration options for the index. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. To unset a version, replace the template without specifying one. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_template/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if aliases is not None: __body["aliases"] = aliases if index_patterns is not None: __body["index_patterns"] = index_patterns if mappings is not None: __body["mappings"] = mappings if order is not None: __body["order"] = order if settings is not None: __body["settings"] = settings if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.put_template", path_parts=__path_parts, ) @_rewrite_parameters() def recovery( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, active_only: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index recovery information. Get information about ongoing and completed shard recoveries for one or more indices. For data streams, the API returns information for the stream's backing indices.

All recoveries, whether ongoing or complete, are kept in the cluster state and may be reported on at any time.

Shard recovery is the process of initializing a shard copy, such as restoring a primary shard from a snapshot or creating a replica shard from a primary shard. When a shard recovery completes, the recovered shard is available for search and indexing.

Recovery automatically occurs during the following processes:

  • When creating an index for the first time.
  • When a node rejoins the cluster and starts up any missing primary shard copies using the data that it holds in its data path.
  • Creation of new replica shard copies from the primary.
  • Relocation of a shard copy to a different node in the same cluster.
  • A snapshot restore operation.
  • A clone, shrink, or split operation.

You can determine the cause of a shard recovery using the recovery or cat recovery APIs.

The index recovery API reports information about completed recoveries only for shard copies that currently exist in the cluster. It only reports the last recovery for each shard copy and does not report historical information about earlier recoveries, nor does it report information about the recoveries of shard copies that no longer exist. This means that if a shard copy completes a recovery and then Elasticsearch relocates it onto a different node then the information about the original recovery will not be shown in the recovery API.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param active_only: If `true`, the response only includes ongoing shard recoveries. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param detailed: If `true`, the response includes detailed information about shard recoveries. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_recovery' else: __path_parts = {} __path = "/_recovery" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.recovery", path_parts=__path_parts, ) @_rewrite_parameters() def refresh( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Refresh an index. A refresh makes recent operations performed on one or more indices available for search. For data streams, the API runs the refresh operation on the stream’s backing indices.

By default, Elasticsearch periodically refreshes indices every second, but only on indices that have received one search request or more in the last 30 seconds. You can change this default interval with the index.refresh_interval setting.

Refresh requests are synchronous and do not return a response until the refresh operation completes.

Refreshes are resource-intensive. To ensure good cluster performance, it's recommended to wait for Elasticsearch's periodic refresh rather than performing an explicit refresh when possible.

If your application workflow indexes documents and then runs a search to retrieve the indexed document, it's recommended to use the index API's refresh=wait_for query parameter option. This option ensures the indexing operation waits for a periodic refresh before running the search.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_refresh' else: __path_parts = {} __path = "/_refresh" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.refresh", path_parts=__path_parts, ) @_rewrite_parameters() def reload_search_analyzers( self, *, index: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, resource: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reload search analyzers. Reload an index's search analyzers and their resources. For data streams, the API reloads search analyzers and resources for the stream's backing indices.

IMPORTANT: After reloading the search analyzers you should clear the request cache to make sure it doesn't contain responses derived from the previous versions of the analyzer.

You can use the reload search analyzers API to pick up changes to synonym files used in the synonym_graph or synonym token filter of a search analyzer. To be eligible, the token filter must have an updateable flag of true and only be used in search analyzers.

NOTE: This API does not perform a reload for each shard of an index. Instead, it performs a reload for each node containing index shards. As a result, the total shard count returned by the API can differ from the number of index shards. Because reloading affects every node with an index shard, it is important to update the synonym file on every data node in the cluster--including nodes that don't contain a shard replica--before using this API. This ensures the synonym file is updated everywhere in the cluster in case shards are relocated in the future.

``_ :param index: A comma-separated list of index names to reload analyzers for :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :param resource: Changed resource to reload analyzers from if applicable """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_reload_search_analyzers' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if resource is not None: __query["resource"] = resource __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.reload_search_analyzers", path_parts=__path_parts, ) @_rewrite_parameters() def remove_block( self, *, index: str, block: t.Union[str, t.Literal["metadata", "read", "read_only", "write"]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Remove an index block.

Remove an index block from an index. Index blocks limit the operations allowed on an index by blocking specific operation types.

``_ :param index: A comma-separated list or wildcard expression of index names used to limit the request. By default, you must explicitly name the indices you are removing blocks from. To allow the removal of blocks from indices with `_all`, `*`, or other wildcard expressions, change the `action.destructive_requires_name` setting to `false`. You can update this setting in the `elasticsearch.yml` file or by using the cluster update settings API. :param block: The block type to remove from the index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: The type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. It supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. It can also be set to `-1` to indicate that the request should never timeout. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. It can also be set to `-1` to indicate that the request should never timeout. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if block in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'block'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "block": _quote(block), } __path = f'/{__path_parts["index"]}/_block/{__path_parts["block"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="indices.remove_block", path_parts=__path_parts, ) @_rewrite_parameters() def resolve_cluster( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resolve the cluster.

Resolve the specified index expressions to return information about each cluster, including the local "querying" cluster, if included. If no index expression is provided, the API will return information about all the remote clusters that are configured on the querying cluster.

This endpoint is useful before doing a cross-cluster search in order to determine which remote clusters should be included in a search.

You use the same index expression with this endpoint as you would for cross-cluster search. Index and cluster exclusions are also supported with this endpoint.

For each cluster in the index expression, information is returned about:

  • Whether the querying ("local") cluster is currently connected to each remote cluster specified in the index expression. Note that this endpoint actively attempts to contact the remote clusters, unlike the remote/info endpoint.
  • Whether each remote cluster is configured with skip_unavailable as true or false.
  • Whether there are any indices, aliases, or data streams on that cluster that match the index expression.
  • Whether the search is likely to have errors returned when you do the cross-cluster search (including any authorization errors if you do not have permission to query the index).
  • Cluster version information, including the Elasticsearch server version.

For example, GET /_resolve/cluster/my-index-*,cluster*:my-index-* returns information about the local cluster and all remotely configured clusters that start with the alias cluster*. Each cluster returns information about whether it has any indices, aliases or data streams that match my-index-*.

Note on backwards compatibility

The ability to query without an index expression was added in version 8.18, so when querying remote clusters older than that, the local cluster will send the index expression dummy* to those remote clusters. Thus, if an errors occur, you may see a reference to that index expression even though you didn't request it. If it causes a problem, you can instead include an index expression like *:* to bypass the issue.

Advantages of using this endpoint before a cross-cluster search

You may want to exclude a cluster or index from a search when:

  • A remote cluster is not currently connected and is configured with skip_unavailable=false. Running a cross-cluster search under those conditions will cause the entire search to fail.
  • A cluster has no matching indices, aliases or data streams for the index expression (or your user does not have permissions to search them). For example, suppose your index expression is logs*,remote1:logs* and the remote1 cluster has no indices, aliases or data streams that match logs*. In that case, that cluster will return no results from that cluster if you include it in a cross-cluster search.
  • The index expression (combined with any query parameters you specify) will likely cause an exception to be thrown when you do the search. In these cases, the "error" field in the _resolve/cluster response will be present. (This is also where security/permission errors will be shown.)
  • A remote cluster is an older version that does not support the feature you want to use in your search.

Test availability of remote clusters

The remote/info endpoint is commonly used to test whether the "local" cluster (the cluster being queried) is connected to its remote clusters, but it does not necessarily reflect whether the remote cluster is available or not. The remote cluster may be available, while the local cluster is not currently connected to it.

You can use the _resolve/cluster API to attempt to reconnect to remote clusters. For example with GET _resolve/cluster or GET _resolve/cluster/*:*. The connected field in the response will indicate whether it was successful. If a connection was (re-)established, this will also cause the remote/info endpoint to now indicate a connected status.

``_ :param name: A comma-separated list of names or index patterns for the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. Index and cluster exclusions (e.g., `-cluster1:*`) are also supported. If no index expression is specified, information about all remote clusters configured on the local cluster is returned without doing any index matching :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param ignore_unavailable: If false, the request returns an error if it targets a missing or closed index. NOTE: This option is only supported when specifying an index expression. You will get an error if you specify index options to the `_resolve/cluster` API endpoint that takes no index expression. :param timeout: The maximum time to wait for remote clusters to respond. If a remote cluster does not respond within this timeout period, the API response will show the cluster as not connected and include an error message that the request timed out. The default timeout is unset and the query can take as long as the networking layer is configured to wait for remote clusters that are not responding (typically 30 seconds). """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_resolve/cluster/{__path_parts["name"]}' else: __path_parts = {} __path = "/_resolve/cluster" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.resolve_cluster", path_parts=__path_parts, ) @_rewrite_parameters() def resolve_index( self, *, name: t.Union[str, t.Sequence[str]], allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Resolve indices. Resolve the names and/or index patterns for indices, aliases, and data streams. Multiple patterns and remote clusters are supported.

``_ :param name: Comma-separated name(s) or index pattern(s) of the indices, aliases, and data streams to resolve. Resources on remote clusters can be specified using the ``:`` syntax. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. For example, a request targeting `foo*,bar*` returns an error if an index starts with `foo` but no index starts with `bar`. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_resolve/index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.resolve_index", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "conditions", "mappings", "settings"), ) def rollover( self, *, alias: str, new_index: t.Optional[str] = None, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, conditions: t.Optional[t.Mapping[str, t.Any]] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, lazy: t.Optional[bool] = None, mappings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Roll over to a new index. TIP: It is recommended to use the index lifecycle rollover action to automate rollovers.

The rollover API creates a new index for a data stream or index alias. The API behavior depends on the rollover target.

Roll over a data stream

If you roll over a data stream, the API creates a new write index for the stream. The stream's previous write index becomes a regular backing index. A rollover also increments the data stream's generation.

Roll over an index alias with a write index

TIP: Prior to Elasticsearch 7.9, you'd typically use an index alias with a write index to manage time series data. Data streams replace this functionality, require less maintenance, and automatically integrate with data tiers.

If an index alias points to multiple indices, one of the indices must be a write index. The rollover API creates a new write index for the alias with is_write_index set to true. The API also sets is_write_index to false for the previous write index.

Roll over an index alias with one index

If you roll over an index alias that points to only one index, the API creates a new index for the alias and removes the original index from the alias.

NOTE: A rollover creates a new index and is subject to the wait_for_active_shards setting.

Increment index names for an alias

When you roll over an index alias, you can specify a name for the new index. If you don't specify a name and the current index ends with - and a number, such as my-index-000001 or my-index-3, the new index name increments that number. For example, if you roll over an alias with a current index of my-index-000001, the rollover creates a new index named my-index-000002. This number is always six characters and zero-padded, regardless of the previous index's name.

If you use an index alias for time series data, you can use date math in the index name to track the rollover date. For example, you can create an alias that points to an index named <my-index-{now/d}-000001>. If you create the index on May 6, 2099, the index's name is my-index-2099.05.06-000001. If you roll over the alias on May 7, 2099, the new index's name is my-index-2099.05.07-000002.

``_ :param alias: Name of the data stream or index alias to roll over. :param new_index: Name of the index to create. Supports date math. Data streams do not support this parameter. :param aliases: Aliases for the target index. Data streams do not support this parameter. :param conditions: Conditions for the rollover. If specified, Elasticsearch only performs the rollover if the current index satisfies these conditions. If this parameter is not specified, Elasticsearch performs the rollover unconditionally. If conditions are specified, at least one of them must be a `max_*` condition. The index will rollover if any `max_*` condition is satisfied and all `min_*` conditions are satisfied. :param dry_run: If `true`, checks whether the current index satisfies the specified conditions but does not perform a rollover. :param lazy: If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams. :param mappings: Mapping for fields in the index. If specified, this mapping can include field names, field data types, and mapping paramaters. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the index. Data streams do not support this parameter. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to all or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'alias'") __path_parts: t.Dict[str, str] if alias not in SKIP_IN_PATH and new_index not in SKIP_IN_PATH: __path_parts = {"alias": _quote(alias), "new_index": _quote(new_index)} __path = f'/{__path_parts["alias"]}/_rollover/{__path_parts["new_index"]}' elif alias not in SKIP_IN_PATH: __path_parts = {"alias": _quote(alias)} __path = f'/{__path_parts["alias"]}/_rollover' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if lazy is not None: __query["lazy"] = lazy if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if conditions is not None: __body["conditions"] = conditions if mappings is not None: __body["mappings"] = mappings if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.rollover", path_parts=__path_parts, ) @_rewrite_parameters() def segments( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index segments. Get low-level information about the Lucene segments in index shards. For data streams, the API returns information about the stream's backing indices.

``_ :param index: Comma-separated list of data streams, indices, and aliases used to limit the request. Supports wildcards (`*`). To target all data streams and indices, omit this parameter or use `*` or `_all`. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_segments' else: __path_parts = {} __path = "/_segments" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.segments", path_parts=__path_parts, ) @_rewrite_parameters() def shard_stores( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, status: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Literal["all", "green", "red", "yellow"]]], t.Union[str, t.Literal["all", "green", "red", "yellow"]], ] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index shard stores. Get store information about replica shards in one or more indices. For data streams, the API retrieves store information for the stream's backing indices.

The index shard stores API returns the following information:

  • The node on which each replica shard exists.
  • The allocation ID for each replica shard.
  • A unique ID for each replica shard.
  • Any errors encountered while opening the shard index or from an earlier failure.

By default, the API returns store information only for primary shards that are unassigned or have one or more unassigned replica shards.

``_ :param index: List of data streams, indices, and aliases used to limit the request. :param allow_no_indices: If false, the request returns an error if any wildcard expression, index alias, or _all value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. :param ignore_unavailable: If true, missing or closed indices are not included in the response. :param status: List of shard health statuses used to limit the request. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_shard_stores' else: __path_parts = {} __path = "/_shard_stores" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if status is not None: __query["status"] = status __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.shard_stores", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) def shrink( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Shrink an index. Shrink an index into a new index with fewer primary shards.

Before you can shrink an index:

  • The index must be read-only.
  • A copy of every shard in the index must reside on the same node.
  • The index must have a green health status.

To make shard allocation easier, we recommend you also remove the index's replica shards. You can later re-add replica shards as part of the shrink operation.

The requested number of primary shards in the target index must be a factor of the number of shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node.

The current write index on a data stream cannot be shrunk. In order to shrink the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be shrunk.

A shrink operation:

  • Creates a new target index with the same definition as the source index, but with a smaller number of primary shards.
  • Hard-links segments from the source index into the target index. If the file system does not support hard-linking, then all segments are copied into the new index, which is a much more time consuming process. Also if using multiple data paths, shards on different data paths require a full copy of segment files if they are not on the same disk since hardlinks do not work across disks.
  • Recovers the target index as though it were a closed index which had just been re-opened. Recovers shards to the .routing.allocation.initial_recovery._id index setting.

IMPORTANT: Indices can only be shrunk if they satisfy the following requirements:

  • The target index must not exist.
  • The source index must have more primary shards than the target index.
  • The number of primary shards in the target index must be a factor of the number of primary shards in the source index. The source index must have more primary shards than the target index.
  • The index must not contain more than 2,147,483,519 documents in total across all shards that will be shrunk into a single shard on the target index as this is the maximum number of docs that can fit into a single shard.
  • The node handling the shrink process must have sufficient free disk space to accommodate a second copy of the existing index.
``_ :param index: Name of the source index to shrink. :param target: Name of the target index to create. :param aliases: The key is the alias name. Index alias names support date math. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_shrink/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.shrink", path_parts=__path_parts, ) @_rewrite_parameters() def simulate_index_template( self, *, name: str, cause: t.Optional[str] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_defaults: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate an index. Get the index configuration that would be applied to the specified index from an existing index template.

``_ :param name: Name of the index to simulate :param cause: User defined reason for dry-run creating the new template for simulation purposes :param create: Whether the index template we optionally defined in the body should only be dry-run added if new or can also replace an existing one :param include_defaults: If true, returns all relevant default configurations for the index template. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_index_template/_simulate_index/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="indices.simulate_index_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_auto_create", "composed_of", "data_stream", "deprecated", "ignore_missing_component_templates", "index_patterns", "meta", "priority", "template", "version", ), parameter_aliases={"_meta": "meta"}, ) def simulate_template( self, *, name: t.Optional[str] = None, allow_auto_create: t.Optional[bool] = None, cause: t.Optional[str] = None, composed_of: t.Optional[t.Sequence[str]] = None, create: t.Optional[bool] = None, data_stream: t.Optional[t.Mapping[str, t.Any]] = None, deprecated: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_missing_component_templates: t.Optional[t.Sequence[str]] = None, include_defaults: t.Optional[bool] = None, index_patterns: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, template: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate an index template. Get the index configuration that would be applied by a particular index template.

``_ :param name: Name of the index template to simulate. To test a template configuration before you add it to the cluster, omit this parameter and specify the template configuration in the request body. :param allow_auto_create: This setting overrides the value of the `action.auto_create_index` cluster setting. If set to `true` in a template, then indices can be automatically created using that template even if auto-creation of indices is disabled via `actions.auto_create_index`. If set to `false`, then indices or data streams matching the template must always be explicitly created, and may never be automatically created. :param cause: User defined reason for dry-run creating the new template for simulation purposes :param composed_of: An ordered list of component template names. Component templates are merged in the order specified, meaning that the last component template specified has the highest precedence. :param create: If true, the template passed in the body is only used if no existing templates match the same index patterns. If false, the simulation uses the template with the highest priority. Note that the template is not permanently added or updated in either case; it is only used for the simulation. :param data_stream: If this object is included, the template is used to create data streams and their backing indices. Supports an empty object. Data streams require a matching index template with a `data_stream` object. :param deprecated: Marks this index template as deprecated. When creating or updating a non-deprecated index template that uses deprecated components, Elasticsearch will emit a deprecation warning. :param ignore_missing_component_templates: The configuration option ignore_missing_component_templates can be used when an index template references a component template that might not exist :param include_defaults: If true, returns all relevant default configurations for the index template. :param index_patterns: Array of wildcard (`*`) expressions used to match the names of data streams and indices during creation. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional user metadata about the index template. May have any contents. This map is not automatically generated by Elasticsearch. :param priority: Priority to determine index template precedence when a new data stream or index is created. The index template with the highest priority is chosen. If no priority is specified the template is treated as though it is of priority 0 (lowest priority). This number is not automatically generated by Elasticsearch. :param template: Template to be applied. It may optionally include an `aliases`, `mappings`, or `settings` configuration. :param version: Version number used to manage index templates externally. This number is not automatically generated by Elasticsearch. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_index_template/_simulate/{__path_parts["name"]}' else: __path_parts = {} __path = "/_index_template/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cause is not None: __query["cause"] = cause if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if include_defaults is not None: __query["include_defaults"] = include_defaults if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if allow_auto_create is not None: __body["allow_auto_create"] = allow_auto_create if composed_of is not None: __body["composed_of"] = composed_of if data_stream is not None: __body["data_stream"] = data_stream if deprecated is not None: __body["deprecated"] = deprecated if ignore_missing_component_templates is not None: __body["ignore_missing_component_templates"] = ( ignore_missing_component_templates ) if index_patterns is not None: __body["index_patterns"] = index_patterns if meta is not None: __body["_meta"] = meta if priority is not None: __body["priority"] = priority if template is not None: __body["template"] = template if version is not None: __body["version"] = version if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.simulate_template", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aliases", "settings"), ) def split( self, *, index: str, target: str, aliases: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_active_shards: t.Optional[ t.Union[int, t.Union[str, t.Literal["all", "index-setting"]]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Split an index. Split an index into a new index with more primary shards.

  • Before you can split an index:

  • The index must be read-only.

  • The cluster health status must be green.

You can do make an index read-only with the following request using the add index block API:

PUT /my_source_index/_block/write
          

The current write index on a data stream cannot be split. In order to split the current write index, the data stream must first be rolled over so that a new write index is created and then the previous write index can be split.

The number of times the index can be split (and the number of shards that each original shard can be split into) is determined by the index.number_of_routing_shards setting. The number of routing shards specifies the hashing space that is used internally to distribute documents across shards with consistent hashing. For instance, a 5 shard index with number_of_routing_shards set to 30 (5 x 2 x 3) could be split by a factor of 2 or 3.

A split operation:

  • Creates a new target index with the same definition as the source index, but with a larger number of primary shards.
  • Hard-links segments from the source index into the target index. If the file system doesn't support hard-linking, all segments are copied into the new index, which is a much more time consuming process.
  • Hashes all documents again, after low level files are created, to delete documents that belong to a different shard.
  • Recovers the target index as though it were a closed index which had just been re-opened.

IMPORTANT: Indices can only be split if they satisfy the following requirements:

  • The target index must not exist.
  • The source index must have fewer primary shards than the target index.
  • The number of primary shards in the target index must be a multiple of the number of primary shards in the source index.
  • The node handling the split process must have sufficient free disk space to accommodate a second copy of the existing index.
``_ :param index: Name of the source index to split. :param target: Name of the target index to create. :param aliases: Aliases for the resulting index. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param settings: Configuration options for the target index. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_active_shards: The number of shard copies that must be active before proceeding with the operation. Set to `all` or any positive integer up to the total number of shards in the index (`number_of_replicas+1`). """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") if target in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target'") __path_parts: t.Dict[str, str] = { "index": _quote(index), "target": _quote(target), } __path = f'/{__path_parts["index"]}/_split/{__path_parts["target"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_active_shards is not None: __query["wait_for_active_shards"] = wait_for_active_shards if not __body: if aliases is not None: __body["aliases"] = aliases if settings is not None: __body["settings"] = settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.split", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, forbid_closed_indices: t.Optional[bool] = None, groups: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, include_segment_file_sizes: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get index statistics. For data streams, the API retrieves statistics for the stream's backing indices.

By default, the returned statistics are index-level with primaries and total aggregations. primaries are the values for only the primary shards. total are the accumulated values for both primary and replica shards.

To get shard-level statistics, set the level parameter to shards.

NOTE: When moving to another node, the shard-level statistics for a shard are cleared. Although the shard is no longer part of the node, that node retains any node-level statistics to which the shard contributed.

``_ :param index: A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices :param metric: Limit the information returned the specific metrics. :param completion_fields: Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param fielddata_fields: Comma-separated list or wildcard expressions of fields to include in fielddata statistics. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param forbid_closed_indices: If true, statistics are not collected from closed indices. :param groups: Comma-separated list of search groups to include in the search statistics. :param include_segment_file_sizes: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). :param include_unloaded_segments: If true, the response includes information from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"index": _quote(index), "metric": _quote(metric)} __path = f'/{__path_parts["index"]}/_stats/{__path_parts["metric"]}' elif index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_stats' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_stats" __query: t.Dict[str, t.Any] = {} if completion_fields is not None: __query["completion_fields"] = completion_fields if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if fielddata_fields is not None: __query["fielddata_fields"] = fielddata_fields if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if forbid_closed_indices is not None: __query["forbid_closed_indices"] = forbid_closed_indices if groups is not None: __query["groups"] = groups if human is not None: __query["human"] = human if include_segment_file_sizes is not None: __query["include_segment_file_sizes"] = include_segment_file_sizes if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="indices.stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions",), ) def update_aliases( self, *, actions: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an alias. Adds a data stream or index to an alias.

``_ :param actions: Actions to perform. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_aliases" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if actions is not None: __body["actions"] = actions __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.update_aliases", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query",), ) def validate_query( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, all_shards: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, analyze_wildcard: t.Optional[bool] = None, analyzer: t.Optional[str] = None, default_operator: t.Optional[t.Union[str, t.Literal["and", "or"]]] = None, df: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, lenient: t.Optional[bool] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rewrite: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate a query. Validates a query without running it.

``_ :param index: Comma-separated list of data streams, indices, and aliases to search. Supports wildcards (`*`). To search all data streams or indices, omit this parameter or use `*` or `_all`. :param all_shards: If `true`, the validation is executed on all shards instead of one random shard per index. :param allow_no_indices: If `false`, the request returns an error if any wildcard expression, index alias, or `_all` value targets only missing or closed indices. This behavior applies even if the request targets other open indices. :param analyze_wildcard: If `true`, wildcard and prefix queries are analyzed. :param analyzer: Analyzer to use for the query string. This parameter can only be used when the `q` query string parameter is specified. :param default_operator: The default operator for query string query: `AND` or `OR`. :param df: Field to use as default where no field prefix is given in the query string. This parameter can only be used when the `q` query string parameter is specified. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values, such as `open,hidden`. :param explain: If `true`, the response returns detailed information if an error has occurred. :param ignore_unavailable: If `false`, the request returns an error if it targets a missing or closed index. :param lenient: If `true`, format-based query failures (such as providing text to a numeric field) in the query string will be ignored. :param q: Query in the Lucene query string syntax. :param query: Query in the Lucene query string syntax. :param rewrite: If `true`, returns a more detailed explanation showing the actual Lucene query that will be executed. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_validate/query' else: __path_parts = {} __path = "/_validate/query" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if all_shards is not None: __query["all_shards"] = all_shards if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if analyze_wildcard is not None: __query["analyze_wildcard"] = analyze_wildcard if analyzer is not None: __query["analyzer"] = analyzer if default_operator is not None: __query["default_operator"] = default_operator if df is not None: __query["df"] = df if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if lenient is not None: __query["lenient"] = lenient if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if rewrite is not None: __query["rewrite"] = rewrite if not __body: if query is not None: __body["query"] = query if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="indices.validate_query", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/inference.py000066400000000000000000003632541506101734100254010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class InferenceClient(NamespacedClient): @_rewrite_parameters( body_fields=("input", "task_settings"), ) def completion( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform completion inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/completion/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.completion", path_parts=__path_parts, ) @_rewrite_parameters() def delete( self, *, inference_id: str, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an inference endpoint

``_ :param inference_id: The inference identifier. :param task_type: The task type :param dry_run: When true, the endpoint is not deleted and a list of ingest processors which reference this endpoint is returned. :param force: When true, the inference endpoint is forcefully deleted even if it is still being used by ingest processors or semantic text fields. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="inference.delete", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, inference_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get an inference endpoint

``_ :param task_type: The task type :param inference_id: The inference Id """ __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: __path_parts = {} __path = "/_inference" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="inference.get", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "input_type", "query", "task_settings"), ) def inference( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, input_type: t.Optional[str] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform inference on the service.

This API enables you to use machine learning models to perform specific tasks on data that you provide as an input. It returns a response with the results of the tasks. The inference endpoint you use can perform one specific task that has been defined when the endpoint was created with the create inference API.

For details about using this API with a service, such as Amazon Bedrock, Anthropic, or HuggingFace, refer to the service-specific documentation.

info The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param task_type: The type of inference task that the model performs. :param input_type: Specifies the input data type for the text embedding model. The `input_type` parameter only applies to Inference Endpoints with the `text_embedding` task type. Possible values include: * `SEARCH` * `INGEST` * `CLASSIFICATION` * `CLUSTERING` Not all services support all values. Unsupported values will trigger a validation exception. Accepted values depend on the configured inference service, refer to the relevant service-specific documentation for more info. > info > The `input_type` parameter specified on the root level of the request body will take precedence over the `input_type` parameter specified in `task_settings`. :param query: The query input, which is required only for the `rerank` task. It is not required for other tasks. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if input_type is not None: __body["input_type"] = input_type if query is not None: __body["query"] = query if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.inference", path_parts=__path_parts, ) @_rewrite_parameters( body_name="inference_config", ) def put( self, *, inference_id: str, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an inference endpoint.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Mistral, Azure OpenAI, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

The following integrations are available through the inference API. You can find the available task types next to the integration name:

  • AlibabaCloud AI Search (completion, rerank, sparse_embedding, text_embedding)
  • Amazon Bedrock (completion, text_embedding)
  • Amazon SageMaker (chat_completion, completion, rerank, sparse_embedding, text_embedding)
  • Anthropic (completion)
  • Azure AI Studio (completion, text_embedding)
  • Azure OpenAI (completion, text_embedding)
  • Cohere (completion, rerank, text_embedding)
  • DeepSeek (chat_completion, completion)
  • Elasticsearch (rerank, sparse_embedding, text_embedding - this service is for built-in models and models uploaded through Eland)
  • ELSER (sparse_embedding)
  • Google AI Studio (completion, text_embedding)
  • Google Vertex AI (chat_completion, completion, rerank, text_embedding)
  • Hugging Face (chat_completion, completion, rerank, text_embedding)
  • JinaAI (rerank, text_embedding)
  • Llama (chat_completion, completion, text_embedding)
  • Mistral (chat_completion, completion, text_embedding)
  • OpenAI (chat_completion, completion, text_embedding)
  • VoyageAI (rerank, text_embedding)
  • Watsonx inference integration (text_embedding)
``_ :param inference_id: The inference Id :param inference_config: :param task_type: The task type. Refer to the integration list in the API description for the available task types. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if inference_config is None and body is None: raise ValueError( "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." ) elif inference_config is not None and body is not None: raise ValueError("Cannot set both 'inference_config' and 'body'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_alibabacloud( self, *, task_type: t.Union[ str, t.Literal["completion", "rerank", "space_embedding", "text_embedding"] ], alibabacloud_inference_id: str, service: t.Optional[t.Union[str, t.Literal["alibabacloud-ai-search"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an AlibabaCloud AI Search inference endpoint.

Create an inference endpoint to perform an inference task with the alibabacloud-ai-search service.

``_ :param task_type: The type of the inference task that the model will perform. :param alibabacloud_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `alibabacloud-ai-search`. :param service_settings: Settings used to install the inference model. These settings are specific to the `alibabacloud-ai-search` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if alibabacloud_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'alibabacloud_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "alibabacloud_inference_id": _quote(alibabacloud_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["alibabacloud_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_alibabacloud", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_amazonbedrock( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], amazonbedrock_inference_id: str, service: t.Optional[t.Union[str, t.Literal["amazonbedrock"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Amazon Bedrock inference endpoint.

Create an inference endpoint to perform an inference task with the amazonbedrock service.

info You need to provide the access and secret keys only once, during the inference model creation. The get inference API does not retrieve your access or secret keys. After creating the inference model, you cannot change the associated key pairs. If you want to use a different access and secret key pair, delete the inference model and recreate it with the same name and the updated keys.

``_ :param task_type: The type of the inference task that the model will perform. :param amazonbedrock_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `amazonbedrock`. :param service_settings: Settings used to install the inference model. These settings are specific to the `amazonbedrock` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if amazonbedrock_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'amazonbedrock_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "amazonbedrock_inference_id": _quote(amazonbedrock_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonbedrock_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_amazonbedrock", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_amazonsagemaker( self, *, task_type: t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ], amazonsagemaker_inference_id: str, service: t.Optional[t.Union[str, t.Literal["amazon_sagemaker"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Amazon SageMaker inference endpoint.

Create an inference endpoint to perform an inference task with the amazon_sagemaker service.

``_ :param task_type: The type of the inference task that the model will perform. :param amazonsagemaker_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `amazon_sagemaker`. :param service_settings: Settings used to install the inference model. These settings are specific to the `amazon_sagemaker` service and `service_settings.api` you specified. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type and `service_settings.api` you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if amazonsagemaker_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'amazonsagemaker_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "amazonsagemaker_inference_id": _quote(amazonsagemaker_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["amazonsagemaker_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_amazonsagemaker", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_anthropic( self, *, task_type: t.Union[str, t.Literal["completion"]], anthropic_inference_id: str, service: t.Optional[t.Union[str, t.Literal["anthropic"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Anthropic inference endpoint.

Create an inference endpoint to perform an inference task with the anthropic service.

``_ :param task_type: The task type. The only valid task type for the model to perform is `completion`. :param anthropic_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `anthropic`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if anthropic_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'anthropic_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "anthropic_inference_id": _quote(anthropic_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["anthropic_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_anthropic", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_azureaistudio( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], azureaistudio_inference_id: str, service: t.Optional[t.Union[str, t.Literal["azureaistudio"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Azure AI studio inference endpoint.

Create an inference endpoint to perform an inference task with the azureaistudio service.

``_ :param task_type: The type of the inference task that the model will perform. :param azureaistudio_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `azureaistudio`. :param service_settings: Settings used to install the inference model. These settings are specific to the `openai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if azureaistudio_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'azureaistudio_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "azureaistudio_inference_id": _quote(azureaistudio_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureaistudio_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_azureaistudio", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_azureopenai( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], azureopenai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["azureopenai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Azure OpenAI inference endpoint.

Create an inference endpoint to perform an inference task with the azureopenai service.

The list of chat completion models that you can choose from in your Azure OpenAI deployment include:

The list of embeddings models that you can choose from in your deployment can be found in the Azure models documentation.

``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. :param azureopenai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `azureopenai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `azureopenai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if azureopenai_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'azureopenai_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "azureopenai_inference_id": _quote(azureopenai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["azureopenai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_azureopenai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_cohere( self, *, task_type: t.Union[str, t.Literal["completion", "rerank", "text_embedding"]], cohere_inference_id: str, service: t.Optional[t.Union[str, t.Literal["cohere"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Cohere inference endpoint.

Create an inference endpoint to perform an inference task with the cohere service.

``_ :param task_type: The type of the inference task that the model will perform. :param cohere_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `cohere`. :param service_settings: Settings used to install the inference model. These settings are specific to the `cohere` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if cohere_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'cohere_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "cohere_inference_id": _quote(cohere_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["cohere_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_cohere", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_custom( self, *, task_type: t.Union[ str, t.Literal["completion", "rerank", "sparse_embedding", "text_embedding"] ], custom_inference_id: str, service: t.Optional[t.Union[str, t.Literal["custom"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a custom inference endpoint.

The custom service gives more control over how to interact with external inference services that aren't explicitly supported through dedicated integrations. The custom service gives you the ability to define the headers, url, query parameters, request body, and secrets. The custom service supports the template replacement functionality, which enables you to define a template that can be replaced with the value associated with that key. Templates are portions of a string that start with ${ and end with }. The parameters secret_parameters and task_settings are checked for keys for template replacement. Template replacement is supported in the request, headers, url, and query_parameters. If the definition (key) is not found for a template, an error message is returned. In case of an endpoint definition like the following:

PUT _inference/text_embedding/test-text-embedding
          {
            "service": "custom",
            "service_settings": {
               "secret_parameters": {
                    "api_key": "<some api key>"
               },
               "url": "...endpoints.huggingface.cloud/v1/embeddings",
               "headers": {
                   "Authorization": "Bearer ${api_key}",
                   "Content-Type": "application/json"
               },
               "request": "{\\"input\\": ${input}}",
               "response": {
                   "json_parser": {
                       "text_embeddings":"$.data[*].embedding[*]"
                   }
               }
            }
          }
          

To replace ${api_key} the secret_parameters and task_settings are checked for a key named api_key.

info Templates should not be surrounded by quotes.

Pre-defined templates:

  • ${input} refers to the array of input strings that comes from the input field of the subsequent inference requests.
  • ${input_type} refers to the input type translation values.
  • ${query} refers to the query field used specifically for reranking tasks.
  • ${top_n} refers to the top_n field available when performing rerank requests.
  • ${return_documents} refers to the return_documents field available when performing rerank requests.
``_ :param task_type: The type of the inference task that the model will perform. :param custom_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `custom`. :param service_settings: Settings used to install the inference model. These settings are specific to the `custom` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if custom_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'custom_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "custom_inference_id": _quote(custom_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["custom_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_custom", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) def put_deepseek( self, *, task_type: t.Union[str, t.Literal["chat_completion", "completion"]], deepseek_inference_id: str, service: t.Optional[t.Union[str, t.Literal["deepseek"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a DeepSeek inference endpoint.

Create an inference endpoint to perform an inference task with the deepseek service.

``_ :param task_type: The type of the inference task that the model will perform. :param deepseek_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `deepseek`. :param service_settings: Settings used to install the inference model. These settings are specific to the `deepseek` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if deepseek_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'deepseek_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "deepseek_inference_id": _quote(deepseek_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["deepseek_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_deepseek", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_elasticsearch( self, *, task_type: t.Union[ str, t.Literal["rerank", "sparse_embedding", "text_embedding"] ], elasticsearch_inference_id: str, service: t.Optional[t.Union[str, t.Literal["elasticsearch"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Elasticsearch inference endpoint.

Create an inference endpoint to perform an inference task with the elasticsearch service.

info Your Elasticsearch deployment contains preconfigured ELSER and E5 inference endpoints, you only need to create the enpoints using the API if you want to customize the settings.

If you use the ELSER or the E5 model through the elasticsearch service, the API request will automatically download and deploy the model if it isn't downloaded yet.

info You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param elasticsearch_inference_id: The unique identifier of the inference endpoint. The must not match the `model_id`. :param service: The type of service supported for the specified task type. In this case, `elasticsearch`. :param service_settings: Settings used to install the inference model. These settings are specific to the `elasticsearch` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if elasticsearch_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'elasticsearch_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "elasticsearch_inference_id": _quote(elasticsearch_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elasticsearch_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_elasticsearch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) def put_elser( self, *, task_type: t.Union[str, t.Literal["sparse_embedding"]], elser_inference_id: str, service: t.Optional[t.Union[str, t.Literal["elser"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an ELSER inference endpoint.

Create an inference endpoint to perform an inference task with the elser service. You can also deploy ELSER by using the Elasticsearch inference integration.

info Your Elasticsearch deployment contains a preconfigured ELSER inference endpoint, you only need to create the enpoint using the API if you want to customize the settings.

The API request will automatically download and deploy the ELSER model if it isn't already downloaded.

info You might see a 502 bad gateway error in the response when using the Kibana Console. This error usually just reflects a timeout, while the model downloads in the background. You can check the download progress in the Machine Learning UI. If using the Python client, you can set the timeout parameter to a higher value.

After creating the endpoint, wait for the model deployment to complete before using it. To verify the deployment status, use the get trained model statistics API. Look for "state": "fully_allocated" in the response and ensure that the "allocation_count" matches the "target_allocation_count". Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param elser_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `elser`. :param service_settings: Settings used to install the inference model. These settings are specific to the `elser` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if elser_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'elser_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "elser_inference_id": _quote(elser_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["elser_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_elser", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) def put_googleaistudio( self, *, task_type: t.Union[str, t.Literal["completion", "text_embedding"]], googleaistudio_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googleaistudio"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an Google AI Studio inference endpoint.

Create an inference endpoint to perform an inference task with the googleaistudio service.

``_ :param task_type: The type of the inference task that the model will perform. :param googleaistudio_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `googleaistudio`. :param service_settings: Settings used to install the inference model. These settings are specific to the `googleaistudio` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if googleaistudio_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'googleaistudio_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "googleaistudio_inference_id": _quote(googleaistudio_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googleaistudio_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_googleaistudio", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_googlevertexai( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] ], googlevertexai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["googlevertexai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Google Vertex AI inference endpoint.

Create an inference endpoint to perform an inference task with the googlevertexai service.

``_ :param task_type: The type of the inference task that the model will perform. :param googlevertexai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `googlevertexai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `googlevertexai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if googlevertexai_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'googlevertexai_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "googlevertexai_inference_id": _quote(googlevertexai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["googlevertexai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_googlevertexai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_hugging_face( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "rerank", "text_embedding"] ], huggingface_inference_id: str, service: t.Optional[t.Union[str, t.Literal["hugging_face"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Hugging Face inference endpoint.

Create an inference endpoint to perform an inference task with the hugging_face service. Supported tasks include: text_embedding, completion, and chat_completion.

To configure the endpoint, first visit the Hugging Face Inference Endpoints page and create a new endpoint. Select a model that supports the task you intend to use.

For Elastic's text_embedding task: The selected model must support the Sentence Embeddings task. On the new endpoint creation page, select the Sentence Embeddings task under the Advanced Configuration section. After the endpoint has initialized, copy the generated endpoint URL. Recommended models for text_embedding task:

  • all-MiniLM-L6-v2
  • all-MiniLM-L12-v2
  • all-mpnet-base-v2
  • e5-base-v2
  • e5-small-v2
  • multilingual-e5-base
  • multilingual-e5-small

For Elastic's chat_completion and completion tasks: The selected model must support the Text Generation task and expose OpenAI API. HuggingFace supports both serverless and dedicated endpoints for Text Generation. When creating dedicated endpoint select the Text Generation task. After the endpoint is initialized (for dedicated) or ready (for serverless), ensure it supports the OpenAI API and includes /v1/chat/completions part in URL. Then, copy the full endpoint URL for use. Recommended models for chat_completion and completion tasks:

  • Mistral-7B-Instruct-v0.2
  • QwQ-32B
  • Phi-3-mini-128k-instruct

For Elastic's rerank task: The selected model must support the sentence-ranking task and expose OpenAI API. HuggingFace supports only dedicated (not serverless) endpoints for Rerank so far. After the endpoint is initialized, copy the full endpoint URL for use. Tested models for rerank task:

  • bge-reranker-base
  • jina-reranker-v1-turbo-en-GGUF
``_ :param task_type: The type of the inference task that the model will perform. :param huggingface_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `hugging_face`. :param service_settings: Settings used to install the inference model. These settings are specific to the `hugging_face` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if huggingface_inference_id in SKIP_IN_PATH: raise ValueError( "Empty value passed for parameter 'huggingface_inference_id'" ) if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "huggingface_inference_id": _quote(huggingface_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["huggingface_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_hugging_face", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_jinaai( self, *, task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], jinaai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["jinaai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an JinaAI inference endpoint.

Create an inference endpoint to perform an inference task with the jinaai service.

To review the available rerank models, refer to https://jina.ai/reranker. To review the available text_embedding models, refer to the https://jina.ai/embeddings/.

``_ :param task_type: The type of the inference task that the model will perform. :param jinaai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `jinaai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `jinaai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if jinaai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'jinaai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "jinaai_inference_id": _quote(jinaai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["jinaai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_jinaai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings", "chunking_settings"), ) def put_mistral( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], mistral_inference_id: str, service: t.Optional[t.Union[str, t.Literal["mistral"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Mistral inference endpoint.

Create an inference endpoint to perform an inference task with the mistral service.

``_ :param task_type: The type of the inference task that the model will perform. :param mistral_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `mistral`. :param service_settings: Settings used to install the inference model. These settings are specific to the `mistral` service. :param chunking_settings: The chunking configuration object. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if mistral_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'mistral_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "mistral_inference_id": _quote(mistral_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["mistral_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_mistral", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_openai( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], openai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["openai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an OpenAI inference endpoint.

Create an inference endpoint to perform an inference task with the openai service or openai compatible APIs.

``_ :param task_type: The type of the inference task that the model will perform. NOTE: The `chat_completion` task type only supports streaming and only through the _stream API. :param openai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `openai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `openai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if openai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'openai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "openai_inference_id": _quote(openai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["openai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_openai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "service", "service_settings", "chunking_settings", "task_settings", ), ) def put_voyageai( self, *, task_type: t.Union[str, t.Literal["rerank", "text_embedding"]], voyageai_inference_id: str, service: t.Optional[t.Union[str, t.Literal["voyageai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, chunking_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a VoyageAI inference endpoint.

Create an inference endpoint to perform an inference task with the voyageai service.

Avoid creating multiple endpoints for the same model unless required, as each endpoint consumes significant resources.

``_ :param task_type: The type of the inference task that the model will perform. :param voyageai_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `voyageai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `voyageai` service. :param chunking_settings: The chunking configuration object. :param task_settings: Settings to configure the inference task. These settings are specific to the task type you specified. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if voyageai_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'voyageai_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "voyageai_inference_id": _quote(voyageai_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["voyageai_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if chunking_settings is not None: __body["chunking_settings"] = chunking_settings if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_voyageai", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("service", "service_settings"), ) def put_watsonx( self, *, task_type: t.Union[ str, t.Literal["chat_completion", "completion", "text_embedding"] ], watsonx_inference_id: str, service: t.Optional[t.Union[str, t.Literal["watsonxai"]]] = None, service_settings: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a Watsonx inference endpoint.

Create an inference endpoint to perform an inference task with the watsonxai service. You need an IBM Cloud Databases for Elasticsearch deployment to use the watsonxai inference service. You can provision one through the IBM catalog, the Cloud Databases CLI plug-in, the Cloud Databases API, or Terraform.

``_ :param task_type: The type of the inference task that the model will perform. :param watsonx_inference_id: The unique identifier of the inference endpoint. :param service: The type of service supported for the specified task type. In this case, `watsonxai`. :param service_settings: Settings used to install the inference model. These settings are specific to the `watsonxai` service. :param timeout: Specifies the amount of time to wait for the inference endpoint to be created. """ if task_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_type'") if watsonx_inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watsonx_inference_id'") if service is None and body is None: raise ValueError("Empty value passed for parameter 'service'") if service_settings is None and body is None: raise ValueError("Empty value passed for parameter 'service_settings'") __path_parts: t.Dict[str, str] = { "task_type": _quote(task_type), "watsonx_inference_id": _quote(watsonx_inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["watsonx_inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if service is not None: __body["service"] = service if service_settings is not None: __body["service_settings"] = service_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.put_watsonx", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "query", "task_settings"), ) def rerank( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, query: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform reranking inference on the service

``_ :param inference_id: The unique identifier for the inference endpoint. :param input: The text on which you want to perform the inference task. It can be a single string or an array. > info > Inference endpoints for the `completion` task type currently only support a single string as input. :param query: Query input. :param task_settings: Task settings for the individual inference request. These settings are specific to the task type you specified and override the task settings specified when initializing the service. :param timeout: The amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/rerank/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if query is not None: __body["query"] = query if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.rerank", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "task_settings"), ) def sparse_embedding( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform sparse embedding inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/sparse_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.sparse_embedding", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("input", "task_settings"), ) def text_embedding( self, *, inference_id: str, input: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, task_settings: t.Optional[t.Any] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Perform text embedding inference on the service

``_ :param inference_id: The inference Id :param input: Inference input. Either a string or an array of strings. :param task_settings: Optional task settings :param timeout: Specifies the amount of time to wait for the inference request to complete. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if input is None and body is None: raise ValueError("Empty value passed for parameter 'input'") __path_parts: t.Dict[str, str] = {"inference_id": _quote(inference_id)} __path = f'/_inference/text_embedding/{__path_parts["inference_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if input is not None: __body["input"] = input if task_settings is not None: __body["task_settings"] = task_settings if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.text_embedding", path_parts=__path_parts, ) @_rewrite_parameters( body_name="inference_config", ) def update( self, *, inference_id: str, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, task_type: t.Optional[ t.Union[ str, t.Literal[ "chat_completion", "completion", "rerank", "sparse_embedding", "text_embedding", ], ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an inference endpoint.

Modify task_settings, secrets (within service_settings), or num_allocations for an inference endpoint, depending on the specific endpoint service and task_type.

IMPORTANT: The inference APIs enable you to use certain services, such as built-in machine learning models (ELSER, E5), models uploaded through Eland, Cohere, OpenAI, Azure, Google AI Studio, Google Vertex AI, Anthropic, Watsonx.ai, or Hugging Face. For built-in models and models uploaded through Eland, the inference APIs offer an alternative way to use and manage trained models. However, if you do not plan to use the inference APIs to use these models or if you want to use non-NLP models, use the machine learning trained model APIs.

``_ :param inference_id: The unique identifier of the inference endpoint. :param inference_config: :param task_type: The type of inference task that the model performs. """ if inference_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'inference_id'") if inference_config is None and body is None: raise ValueError( "Empty value passed for parameters 'inference_config' and 'body', one of them should be set." ) elif inference_config is not None and body is not None: raise ValueError("Cannot set both 'inference_config' and 'body'") __path_parts: t.Dict[str, str] if task_type not in SKIP_IN_PATH and inference_id not in SKIP_IN_PATH: __path_parts = { "task_type": _quote(task_type), "inference_id": _quote(inference_id), } __path = f'/_inference/{__path_parts["task_type"]}/{__path_parts["inference_id"]}/_update' elif inference_id not in SKIP_IN_PATH: __path_parts = {"inference_id": _quote(inference_id)} __path = f'/_inference/{__path_parts["inference_id"]}/_update' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = inference_config if inference_config is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="inference.update", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/ingest.py000066400000000000000000000761511506101734100247310ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class IngestClient(NamespacedClient): @_rewrite_parameters() def delete_geoip_database( self, *, id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete GeoIP database configurations.

Delete one or more IP geolocation database configurations.

``_ :param id: A comma-separated list of geoip database configurations to delete :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters() def delete_ip_location_database( self, *, id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete IP geolocation database configurations.

``_ :param id: A comma-separated list of IP location database configurations. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters() def delete_pipeline( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete pipelines. Delete one or more ingest pipelines.

``_ :param id: Pipeline ID or wildcard expression of pipeline IDs used to limit the request. To delete all ingest pipelines in a cluster, use a value of `*`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ingest.delete_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() def geo_ip_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get GeoIP statistics. Get download statistics for GeoIP2 databases that are used with the GeoIP processor.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/geoip/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.geo_ip_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_geoip_database( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get GeoIP database configurations.

Get information about one or more IP geolocation database configurations.

``_ :param id: A comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/geoip/database" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters() def get_ip_location_database( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get IP geolocation database configurations.

``_ :param id: Comma-separated list of database configuration IDs to retrieve. Wildcard (`*`) expressions are supported. To get all database configurations, omit this parameter or use `*`. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/ip_location/database" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters() def get_pipeline( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, summary: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get pipelines.

Get information about one or more ingest pipelines. This API returns a local reference of the pipeline.

``_ :param id: Comma-separated list of pipeline IDs to retrieve. Wildcard (`*`) expressions are supported. To get all ingest pipelines, omit this parameter or use `*`. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param summary: Return pipelines without their definitions (default: false) """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ingest/pipeline" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if summary is not None: __query["summary"] = summary __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.get_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() def processor_grok( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a grok processor. Extract structured fields out of a single text field within a document. You must choose which field to extract matched fields from, as well as the grok pattern you expect will match. A grok pattern is like a regular expression that supports aliased expressions that can be reused.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ingest/processor/grok" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ingest.processor_grok", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("maxmind", "name"), ) def put_geoip_database( self, *, id: str, maxmind: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a GeoIP database configuration.

Refer to the create or update IP geolocation database configuration API.

``_ :param id: ID of the database configuration to create or update. :param maxmind: The configuration necessary to identify which IP geolocation provider to use to download the database, as well as any provider-specific configuration necessary for such downloading. At present, the only supported provider is maxmind, and the maxmind provider requires that an account_id (string) is configured. :param name: The provider-assigned name of the IP geolocation database to download. :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if maxmind is None and body is None: raise ValueError("Empty value passed for parameter 'maxmind'") if name is None and body is None: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/geoip/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if maxmind is not None: __body["maxmind"] = maxmind if name is not None: __body["name"] = name __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_geoip_database", path_parts=__path_parts, ) @_rewrite_parameters( body_name="configuration", ) def put_ip_location_database( self, *, id: str, configuration: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update an IP geolocation database configuration.

``_ :param id: The database configuration identifier. :param configuration: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. A value of `-1` indicates that the request should never time out. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response indicates that it was not completely acknowledged. A value of `-1` indicates that the request should never time out. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if configuration is None and body is None: raise ValueError( "Empty value passed for parameters 'configuration' and 'body', one of them should be set." ) elif configuration is not None and body is not None: raise ValueError("Cannot set both 'configuration' and 'body'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/ip_location/database/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __body = configuration if configuration is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_ip_location_database", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "deprecated", "description", "meta", "on_failure", "processors", "version", ), parameter_aliases={"_meta": "meta"}, ) def put_pipeline( self, *, id: str, deprecated: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_version: t.Optional[int] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, on_failure: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, processors: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a pipeline. Changes made using this API take effect immediately.

``_ :param id: ID of the ingest pipeline to create or update. :param deprecated: Marks this ingest pipeline as deprecated. When a deprecated ingest pipeline is referenced as the default or final pipeline when creating or updating a non-deprecated index template, Elasticsearch will emit a deprecation warning. :param description: Description of the ingest pipeline. :param if_version: Required version for optimistic concurrency control for pipeline updates :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param meta: Optional metadata about the ingest pipeline. May have any contents. This map is not automatically generated by Elasticsearch. :param on_failure: Processors to run immediately after a processor failure. Each processor supports a processor-level `on_failure` value. If a processor without an `on_failure` value fails, Elasticsearch uses this pipeline-level parameter as a fallback. The processors in this parameter run sequentially in the order specified. Elasticsearch will not attempt to run the pipeline's remaining processors. :param processors: Processors used to perform transformations on documents before indexing. Processors run sequentially in the order specified. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param version: Version number used by external systems to track ingest pipelines. This parameter is intended for external systems only. Elasticsearch does not use or validate pipeline version numbers. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_version is not None: __query["if_version"] = if_version if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if deprecated is not None: __body["deprecated"] = deprecated if description is not None: __body["description"] = description if meta is not None: __body["_meta"] = meta if on_failure is not None: __body["on_failure"] = on_failure if processors is not None: __body["processors"] = processors if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.put_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "pipeline"), ) def simulate( self, *, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, verbose: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate a pipeline.

Run an ingest pipeline against a set of provided documents. You can either specify an existing pipeline to use with the provided documents or supply a pipeline definition in the body of the request.

``_ :param docs: Sample documents to test in the pipeline. :param id: The pipeline to test. If you don't specify a `pipeline` in the request body, this parameter is required. :param pipeline: The pipeline to test. If you don't specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. :param verbose: If `true`, the response includes output data for each processor in the executed pipeline. """ if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ingest/pipeline/{__path_parts["id"]}/_simulate' else: __path_parts = {} __path = "/_ingest/pipeline/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if verbose is not None: __query["verbose"] = verbose if not __body: if docs is not None: __body["docs"] = docs if pipeline is not None: __body["pipeline"] = pipeline __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ingest.simulate", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/license.py000066400000000000000000000403721506101734100250560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class LicenseClient(NamespacedClient): @_rewrite_parameters() def delete( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete the license.

When the license expires, your subscription level reverts to Basic.

If the operator privileges feature is enabled, only operator users can use this API.

``_ :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="license.delete", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, accept_enterprise: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get license information.

Get information about your Elastic license including its type, its status, when it was issued, and when it expires.

info If the master node is generating a new cluster state, the get license API may return a 404 Not Found response. If you receive an unexpected 404 response after cluster startup, wait a short period and retry the request.

``_ :param accept_enterprise: If `true`, this parameter returns enterprise for Enterprise license types. If `false`, this parameter returns platinum for both platinum and enterprise license types. This behavior is maintained for backwards compatibility. This parameter is deprecated and will always be set to true in 8.x. :param local: Specifies whether to retrieve local information. The default value is `false`, which means the information is retrieved from the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} if accept_enterprise is not None: __query["accept_enterprise"] = accept_enterprise if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get", path_parts=__path_parts, ) @_rewrite_parameters() def get_basic_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the basic license status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/basic_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get_basic_status", path_parts=__path_parts, ) @_rewrite_parameters() def get_trial_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the trial status.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_license/trial_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="license.get_trial_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("license", "licenses"), ) def post( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, license: t.Optional[t.Mapping[str, t.Any]] = None, licenses: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update the license.

You can update your license at runtime without shutting down your nodes. License updates take effect immediately. If the license you are installing does not support all of the features that were available with your previous license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

NOTE: If Elasticsearch security features are enabled and you are installing a gold or higher license, you must enable TLS on the transport networking layer before you install the license. If the operator privileges feature is enabled, only operator users can use this API.

``_ :param acknowledge: Specifies whether you acknowledge the license changes. :param license: :param licenses: A sequence of one or more JSON documents containing the license information. :param master_timeout: The period to wait for a connection to the master node. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if license is not None: __body["license"] = license if licenses is not None: __body["licenses"] = licenses if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="license.post", path_parts=__path_parts, ) @_rewrite_parameters() def post_start_basic( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a basic license.

Start an indefinite basic license, which gives access to all the basic features.

NOTE: In order to start a basic license, you must not currently have a basic license.

If the basic license does not support all of the features that are available with your current license, however, you are notified in the response. You must then re-submit the API request with the acknowledge parameter set to true.

To check the status of your basic license, use the get basic license API.

``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_basic" __query: t.Dict[str, t.Any] = {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="license.post_start_basic", path_parts=__path_parts, ) @_rewrite_parameters() def post_start_trial( self, *, acknowledge: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, type: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a trial. Start a 30-day trial, which gives access to all subscription features.

NOTE: You are allowed to start a trial only if your cluster has not already activated a trial for the current major product version. For example, if you have already activated a trial for v8.0, you cannot start a new trial until v9.0. You can, however, request an extended trial at https://www.elastic.co/trialextension.

To check the status of your trial, use the get trial status API.

``_ :param acknowledge: whether the user has acknowledged acknowledge messages (default: false) :param master_timeout: Period to wait for a connection to the master node. :param type: The type of trial license to generate (default: "trial") """ __path_parts: t.Dict[str, str] = {} __path = "/_license/start_trial" __query: t.Dict[str, t.Any] = {} if acknowledge is not None: __query["acknowledge"] = acknowledge if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if type is not None: __query["type"] = type __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="license.post_start_trial", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/logstash.py000066400000000000000000000145421506101734100252600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class LogstashClient(NamespacedClient): @_rewrite_parameters() def delete_pipeline( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a Logstash pipeline. Delete a pipeline that is used for Logstash Central Management. If the request succeeds, you receive an empty response with an appropriate status code.

``_ :param id: An identifier for the pipeline. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="logstash.delete_pipeline", path_parts=__path_parts, ) @_rewrite_parameters() def get_pipeline( self, *, id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Logstash pipelines. Get pipelines that are used for Logstash Central Management.

``_ :param id: A comma-separated list of pipeline identifiers. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' else: __path_parts = {} __path = "/_logstash/pipeline" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="logstash.get_pipeline", path_parts=__path_parts, ) @_rewrite_parameters( body_name="pipeline", ) def put_pipeline( self, *, id: str, pipeline: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a Logstash pipeline.

Create a pipeline that is used for Logstash Central Management. If the specified pipeline exists, it is replaced.

``_ :param id: An identifier for the pipeline. :param pipeline: """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if pipeline is None and body is None: raise ValueError( "Empty value passed for parameters 'pipeline' and 'body', one of them should be set." ) elif pipeline is not None and body is not None: raise ValueError("Cannot set both 'pipeline' and 'body'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_logstash/pipeline/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = pipeline if pipeline is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="logstash.put_pipeline", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/migration.py000066400000000000000000000143461506101734100254270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class MigrationClient(NamespacedClient): @_rewrite_parameters() def deprecations( self, *, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get deprecation information. Get information about different cluster, node, and index level settings that use deprecated features that will be removed or changed in the next major version.

TIP: This APIs is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.

``_ :param index: Comma-separate list of data streams or indices to check. Wildcard (*) expressions are supported. """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_migration/deprecations' else: __path_parts = {} __path = "/_migration/deprecations" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="migration.deprecations", path_parts=__path_parts, ) @_rewrite_parameters() def get_feature_upgrade_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get feature migration information. Version upgrades sometimes require changes to how features store configuration information and data in system indices. Check which features need to be migrated and the status of any migrations that are in progress.

TIP: This API is designed for indirect use by the Upgrade Assistant. You are strongly recommended to use the Upgrade Assistant.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="migration.get_feature_upgrade_status", path_parts=__path_parts, ) @_rewrite_parameters() def post_feature_upgrade( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the feature migration. Version upgrades sometimes require changes to how features store configuration information and data in system indices. This API starts the automatic migration process.

Some functionality might be temporarily unavailable during the migration process.

TIP: The API is designed for indirect use by the Upgrade Assistant. We strongly recommend you use the Upgrade Assistant.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_migration/system_features" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="migration.post_feature_upgrade", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/ml.py000066400000000000000000010101761506101734100240440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class MlClient(NamespacedClient): @_rewrite_parameters() def clear_trained_model_deployment_cache( self, *, model_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear trained model deployment cache.

Cache will be cleared on all nodes where the trained model is assigned. A trained model deployment may have an inference cache enabled. As requests are handled by each allocated node, their responses may be cached on that individual node. Calling this API clears the caches without restarting the deployment.

``_ :param model_id: The unique identifier of the trained model. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = ( f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/cache/_clear' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.clear_trained_model_deployment_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("allow_no_match", "force", "timeout"), ) def close_job( self, *, job_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Close anomaly detection jobs.

A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. When you close a job, it runs housekeeping tasks such as pruning the model history, flushing buffers, calculating final results and persisting the model snapshots. Depending upon the size of the job, it could take several minutes to close and the equivalent time to re-open. After it is closed, the job has a minimal overhead on the cluster except for maintaining its meta data. Therefore it is a best practice to close jobs that are no longer required to process data. If you close an anomaly detection job whose datafeed is running, the request first tries to stop the datafeed. This behavior is equivalent to calling stop datafeed API with the same timeout and force parameters as the close job request. When a datafeed that has a specified end date stops, it automatically closes its associated job.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. You can close multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can close all jobs by using `_all` or by specifying `*` as the job identifier. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param force: Refer to the descriptiion for the `force` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_close' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if force is not None: __body["force"] = force if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.close_job", path_parts=__path_parts, ) @_rewrite_parameters() def delete_calendar( self, *, calendar_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a calendar.

Remove all scheduled events from a calendar, then delete it.

``_ :param calendar_id: A string that uniquely identifies a calendar. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar", path_parts=__path_parts, ) @_rewrite_parameters() def delete_calendar_event( self, *, calendar_id: str, event_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete events from a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param event_id: Identifier for the scheduled event. You can obtain this identifier by using the get calendar events API. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if event_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'event_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "event_id": _quote(event_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events/{__path_parts["event_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar_event", path_parts=__path_parts, ) @_rewrite_parameters() def delete_calendar_job( self, *, calendar_id: str, job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete anomaly jobs from a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "job_id": _quote(job_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/jobs/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_calendar_job", path_parts=__path_parts, ) @_rewrite_parameters() def delete_data_frame_analytics( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a data frame analytics job.

``_ :param id: Identifier for the data frame analytics job. :param force: If `true`, it deletes a job that is not stopped; this method is quicker than stopping and deleting the job. :param timeout: The time to wait for the job to be deleted. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters() def delete_datafeed( self, *, datafeed_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a datafeed.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param force: Use to forcefully delete a started datafeed; this method is quicker than stopping and deleting the datafeed. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("requests_per_second", "timeout"), ) def delete_expired_data( self, *, job_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, requests_per_second: t.Optional[float] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete expired ML data.

Delete all job results, model snapshots and forecast data that have exceeded their retention days period. Machine learning state documents that are not associated with any job are also deleted. You can limit the request to a single or set of anomaly detection jobs by using a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. You can delete expired data for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>.

``_ :param job_id: Identifier for an anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. :param requests_per_second: The desired requests per second for the deletion processes. The default behavior is no throttling. :param timeout: How long can the underlying delete processes run until they are canceled. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/_delete_expired_data/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_ml/_delete_expired_data" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if requests_per_second is not None: __body["requests_per_second"] = requests_per_second if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.delete_expired_data", path_parts=__path_parts, ) @_rewrite_parameters() def delete_filter( self, *, filter_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a filter.

If an anomaly detection job references the filter, you cannot delete the filter. You must update or delete the job before you can delete the filter.

``_ :param filter_id: A string that uniquely identifies a filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_filter", path_parts=__path_parts, ) @_rewrite_parameters() def delete_forecast( self, *, job_id: str, forecast_id: t.Optional[str] = None, allow_no_forecasts: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete forecasts from a job.

By default, forecasts are retained for 14 days. You can specify a different retention period with the expires_in parameter in the forecast jobs API. The delete forecast API enables you to delete one or more forecasts before they expire.

``_ :param job_id: Identifier for the anomaly detection job. :param forecast_id: A comma-separated list of forecast identifiers. If you do not specify this optional parameter or if you specify `_all` or `*` the API deletes all forecasts from the job. :param allow_no_forecasts: Specifies whether an error occurs when there are no forecasts. In particular, if this parameter is set to `false` and there are no forecasts associated with the job, attempts to delete all forecasts return an error. :param timeout: Specifies the period of time to wait for the completion of the delete operation. When this period of time elapses, the API fails and returns an error. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and forecast_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "forecast_id": _quote(forecast_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast/{__path_parts["forecast_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if allow_no_forecasts is not None: __query["allow_no_forecasts"] = allow_no_forecasts if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_forecast", path_parts=__path_parts, ) @_rewrite_parameters() def delete_job( self, *, job_id: str, delete_user_annotations: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an anomaly detection job.

All job configuration, model state and results are deleted. It is not currently possible to delete multiple jobs using wildcards or a comma separated list. If you delete a job that has a datafeed, the request first tries to delete the datafeed. This behavior is equivalent to calling the delete datafeed API with the same timeout and force parameters as the delete job request.

``_ :param job_id: Identifier for the anomaly detection job. :param delete_user_annotations: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. :param force: Use to forcefully delete an opened job; this method is quicker than closing and deleting the job. :param wait_for_completion: Specifies whether the request should return immediately or wait until the job deletion completes. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if delete_user_annotations is not None: __query["delete_user_annotations"] = delete_user_annotations if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_job", path_parts=__path_parts, ) @_rewrite_parameters() def delete_model_snapshot( self, *, job_id: str, snapshot_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a model snapshot.

You cannot delete the active model snapshot. To delete that snapshot, first revert to a different one. To identify the active model snapshot, refer to the model_snapshot_id in the results from the get jobs API.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters() def delete_trained_model( self, *, model_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an unreferenced trained model.

The request deletes a trained inference model that is not referenced by an ingest pipeline.

``_ :param model_id: The unique identifier of the trained model. :param force: Forcefully deletes a trained model that is referenced by ingest pipelines or has a started deployment. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() def delete_trained_model_alias( self, *, model_id: str, model_alias: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a trained model alias.

This API deletes an existing model alias that refers to a trained model. If the model alias is missing or refers to a model other than the one identified by the model_id, this API returns an error.

``_ :param model_id: The trained model ID to which the model alias refers. :param model_alias: The model alias to delete. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if model_alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_alias'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "model_alias": _quote(model_alias), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/model_aliases/{__path_parts["model_alias"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="ml.delete_trained_model_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "max_bucket_cardinality", "overall_cardinality", ), ) def estimate_model_memory( self, *, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_bucket_cardinality: t.Optional[t.Mapping[str, int]] = None, overall_cardinality: t.Optional[t.Mapping[str, int]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Estimate job model memory usage.

Make an estimation of the memory usage for an anomaly detection job model. The estimate is based on analysis configuration details for the job and cardinality estimates for the fields it references.

``_ :param analysis_config: For a list of the properties that you can specify in the `analysis_config` component of the body of this API. :param max_bucket_cardinality: Estimates of the highest cardinality in a single bucket that is observed for influencer fields over the time period that the job analyzes data. To produce a good answer, values must be provided for all influencer fields. Providing values for fields that are not listed as `influencers` has no effect on the estimation. :param overall_cardinality: Estimates of the cardinality that is observed for fields over the whole time period that the job analyzes data. To produce a good answer, values must be provided for fields referenced in the `by_field_name`, `over_field_name` and `partition_field_name` of any detectors. Providing values for other fields has no effect on the estimation. It can be omitted from the request if no detectors have a `by_field_name`, `over_field_name` or `partition_field_name`. """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_estimate_model_memory" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if max_bucket_cardinality is not None: __body["max_bucket_cardinality"] = max_bucket_cardinality if overall_cardinality is not None: __body["overall_cardinality"] = overall_cardinality __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.estimate_model_memory", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("evaluation", "index", "query"), ) def evaluate_data_frame( self, *, evaluation: t.Optional[t.Mapping[str, t.Any]] = None, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate data frame analytics.

The API packages together commonly used evaluation metrics for various types of machine learning features. This has been designed for use on indexes created by data frame analytics. Evaluation requires both a ground truth field and an analytics result field to be present.

``_ :param evaluation: Defines the type of evaluation you want to perform. :param index: Defines the `index` in which the evaluation will be performed. :param query: A query clause that retrieves a subset of data from the source index. """ if evaluation is None and body is None: raise ValueError("Empty value passed for parameter 'evaluation'") if index is None and body is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {} __path = "/_ml/data_frame/_evaluate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if evaluation is not None: __body["evaluation"] = evaluation if index is not None: __body["index"] = index if query is not None: __body["query"] = query __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.evaluate_data_frame", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_start", "analysis", "analyzed_fields", "description", "dest", "max_num_threads", "model_memory_limit", "source", ), ) def explain_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_lazy_start: t.Optional[bool] = None, analysis: t.Optional[t.Mapping[str, t.Any]] = None, analyzed_fields: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Explain data frame analytics config.

This API provides explanations for a data frame analytics config that either exists already or one that has not been created yet. The following explanations are provided:

  • which fields are included or not in the analysis and why,
  • how much memory is estimated to be required. The estimate can be used when deciding the appropriate value for model_memory_limit setting later on. If you have object fields or fields that are excluded via source filtering, they are not included in the explanation.
``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. :param analysis: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. :param analyzed_fields: Specify includes and/or excludes patterns to select which fields will be included in the analysis. The patterns specified in excludes are applied last, therefore excludes takes precedence. In other words, if the same field is specified in both includes and excludes, then the field will not be included in the analysis. :param description: A description of the job. :param dest: The destination configuration, consisting of index and optionally results_field (ml by default). :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. :param source: The configuration of how to source the analysis data. It requires an index. Optionally, query and _source may be specified. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_explain' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_explain" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if analysis is not None: __body["analysis"] = analysis if analyzed_fields is not None: __body["analyzed_fields"] = analyzed_fields if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if source is not None: __body["source"] = source if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.explain_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("advance_time", "calc_interim", "end", "skip_time", "start"), ) def flush_job( self, *, job_id: str, advance_time: t.Optional[t.Union[str, t.Any]] = None, calc_interim: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, skip_time: t.Optional[t.Union[str, t.Any]] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Force buffered data to be processed. The flush jobs API is only applicable when sending data for analysis using the post data API. Depending on the content of the buffer, then it might additionally calculate new results. Both flush and close operations are similar, however the flush is more efficient if you are expecting to send more data for analysis. When flushing, the job remains open and is available to continue analyzing data. A close operation additionally prunes and persists the model state to disk and the job must be opened again before analyzing further data.

``_ :param job_id: Identifier for the anomaly detection job. :param advance_time: Refer to the description for the `advance_time` query parameter. :param calc_interim: Refer to the description for the `calc_interim` query parameter. :param end: Refer to the description for the `end` query parameter. :param skip_time: Refer to the description for the `skip_time` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_flush' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if advance_time is not None: __body["advance_time"] = advance_time if calc_interim is not None: __body["calc_interim"] = calc_interim if end is not None: __body["end"] = end if skip_time is not None: __body["skip_time"] = skip_time if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.flush_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("duration", "expires_in", "max_model_memory"), ) def forecast( self, *, job_id: str, duration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, error_trace: t.Optional[bool] = None, expires_in: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_model_memory: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Predict future behavior of a time series.

Forecasts are not supported for jobs that perform population analysis; an error occurs if you try to create a forecast for a job that has an over_field_name in its configuration. Forcasts predict future behavior based on historical data.

``_ :param job_id: Identifier for the anomaly detection job. The job must be open when you create a forecast; otherwise, an error occurs. :param duration: Refer to the description for the `duration` query parameter. :param expires_in: Refer to the description for the `expires_in` query parameter. :param max_model_memory: Refer to the description for the `max_model_memory` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_forecast' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if duration is not None: __body["duration"] = duration if expires_in is not None: __body["expires_in"] = expires_in if max_model_memory is not None: __body["max_model_memory"] = max_model_memory if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.forecast", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "anomaly_score", "desc", "end", "exclude_interim", "expand", "page", "sort", "start", ), parameter_aliases={"from": "from_"}, ) def get_buckets( self, *, job_id: str, timestamp: t.Optional[t.Union[str, t.Any]] = None, anomaly_score: t.Optional[float] = None, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, expand: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for buckets. The API presents a chronological view of the records, grouped by bucket.

``_ :param job_id: Identifier for the anomaly detection job. :param timestamp: The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. :param anomaly_score: Refer to the description for the `anomaly_score` query parameter. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param expand: Refer to the description for the `expand` query parameter. :param from_: Skips the specified number of buckets. :param page: :param size: Specifies the maximum number of buckets to obtain. :param sort: Refer to the desription for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and timestamp not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id), "timestamp": _quote(timestamp)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/buckets/{__path_parts["timestamp"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/buckets' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if anomaly_score is not None: __body["anomaly_score"] = anomaly_score if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if expand is not None: __body["expand"] = expand if page is not None: __body["page"] = page if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_buckets", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_calendar_events( self, *, calendar_id: str, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, job_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get info about events in calendars.

``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. :param end: Specifies to get events with timestamps earlier than this time. :param from_: Skips the specified number of events. :param job_id: Specifies to get events for a specific anomaly detection job identifier or job group. It must be used with a calendar identifier of `_all` or `*`. :param size: Specifies the maximum number of events to obtain. :param start: Specifies to get events with timestamps after this time. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events' __query: t.Dict[str, t.Any] = {} if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if job_id is not None: __query["job_id"] = job_id if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if start is not None: __query["start"] = start __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_calendar_events", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) def get_calendars( self, *, calendar_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get calendar configuration info.

``_ :param calendar_id: A string that uniquely identifies a calendar. You can get information for multiple calendars by using a comma-separated list of ids or a wildcard expression. You can get information for all calendars by using `_all` or `*` or by omitting the calendar identifier. :param from_: Skips the specified number of calendars. This parameter is supported only when you omit the calendar identifier. :param page: This object is supported only when you omit the calendar identifier. :param size: Specifies the maximum number of calendars to obtain. This parameter is supported only when you omit the calendar identifier. """ __path_parts: t.Dict[str, str] if calendar_id not in SKIP_IN_PATH: __path_parts = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' else: __path_parts = {} __path = "/_ml/calendars" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_calendars", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) def get_categories( self, *, job_id: str, category_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, partition_field_value: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for categories.

``_ :param job_id: Identifier for the anomaly detection job. :param category_id: Identifier for the category, which is unique in the job. If you specify neither the category ID nor the partition_field_value, the API returns information about all categories. If you specify only the partition_field_value, it returns information about all categories for the specified partition. :param from_: Skips the specified number of categories. :param page: Configures pagination. This parameter has the `from` and `size` properties. :param partition_field_value: Only return categories for the specified partition. :param size: Specifies the maximum number of categories to obtain. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and category_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "category_id": _quote(category_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/categories/{__path_parts["category_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = ( f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/categories' ) else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if partition_field_value is not None: __query["partition_field_value"] = partition_field_value if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_categories", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_data_frame_analytics( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data frame analytics job configuration info. You can get information for multiple data frame analytics jobs in a single API request by using a comma-separated list of data frame analytics jobs or a wildcard expression.

``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of data frame analytics jobs. :param size: Specifies the maximum number of data frame analytics jobs to obtain. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' else: __path_parts = {} __path = "/_ml/data_frame/analytics" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_data_frame_analytics_stats( self, *, id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get data frame analytics job stats.

``_ :param id: Identifier for the data frame analytics job. If you do not specify this option, the API returns information for the first hundred data frame analytics jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a 404 status code when there are no matches or only partial matches. :param from_: Skips the specified number of data frame analytics jobs. :param size: Specifies the maximum number of data frame analytics jobs to obtain. :param verbose: Defines whether the stats response should be verbose. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_stats' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_data_frame_analytics_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_datafeed_stats( self, *, datafeed_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get datafeed stats. You can get statistics for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get statistics for all datafeeds by using _all, by specifying * as the <feed_id>, or by omitting the <feed_id>. If the datafeed is stopped, the only information you receive is the datafeed_id and the state. This API returns a maximum of 10,000 datafeeds.

``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_stats' else: __path_parts = {} __path = "/_ml/datafeeds/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_datafeed_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_datafeeds( self, *, datafeed_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get datafeeds configuration info. You can get information for multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can get information for all datafeeds by using _all, by specifying * as the <feed_id>, or by omitting the <feed_id>. This API returns a maximum of 10,000 datafeeds.

``_ :param datafeed_id: Identifier for the datafeed. It can be a datafeed identifier or a wildcard expression. If you do not specify one of these options, the API returns information about all datafeeds. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no datafeeds that match. 2. Contains the `_all` string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `datafeeds` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' else: __path_parts = {} __path = "/_ml/datafeeds" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_datafeeds", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_filters( self, *, filter_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get filters. You can get a single filter or all filters.

``_ :param filter_id: A string that uniquely identifies a filter. :param from_: Skips the specified number of filters. :param size: Specifies the maximum number of filters to obtain. """ __path_parts: t.Dict[str, str] if filter_id not in SKIP_IN_PATH: __path_parts = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' else: __path_parts = {} __path = "/_ml/filters" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_filters", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("page",), parameter_aliases={"from": "from_"}, ) def get_influencers( self, *, job_id: str, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, influencer_score: t.Optional[float] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job results for influencers. Influencers are the entities that have contributed to, or are to blame for, the anomalies. Influencer results are available only if an influencer_field_name is specified in the job configuration.

``_ :param job_id: Identifier for the anomaly detection job. :param desc: If true, the results are sorted in descending order. :param end: Returns influencers with timestamps earlier than this time. The default value means it is unset and results are not limited to specific timestamps. :param exclude_interim: If true, the output excludes interim results. By default, interim results are included. :param from_: Skips the specified number of influencers. :param influencer_score: Returns influencers with anomaly scores greater than or equal to this value. :param page: Configures pagination. This parameter has the `from` and `size` properties. :param size: Specifies the maximum number of influencers to obtain. :param sort: Specifies the sort field for the requested influencers. By default, the influencers are sorted by the `influencer_score` value. :param start: Returns influencers with timestamps after this time. The default value means it is unset and results are not limited to specific timestamps. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/influencers' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if desc is not None: __query["desc"] = desc if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if exclude_interim is not None: __query["exclude_interim"] = exclude_interim if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if influencer_score is not None: __query["influencer_score"] = influencer_score if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if sort is not None: __query["sort"] = sort if start is not None: __query["start"] = start if not __body: if page is not None: __body["page"] = page if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_influencers", path_parts=__path_parts, ) @_rewrite_parameters() def get_job_stats( self, *, job_id: t.Optional[str] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job stats.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If `true`, the API returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If `false`, the API returns a `404` status code when there are no matches or only partial matches. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_stats' else: __path_parts = {} __path = "/_ml/anomaly_detectors/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_job_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_jobs( self, *, job_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection jobs configuration info. You can get information for multiple anomaly detection jobs in a single API request by using a group name, a comma-separated list of jobs, or a wildcard expression. You can get information for all anomaly detection jobs by using _all, by specifying * as the <job_id>, or by omitting the <job_id>.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, or a wildcard expression. If you do not specify one of these options, the API returns information for all anomaly detection jobs. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is `true`, which returns an empty `jobs` array when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. """ __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' else: __path_parts = {} __path = "/_ml/anomaly_detectors" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_jobs", path_parts=__path_parts, ) @_rewrite_parameters() def get_memory_stats( self, *, node_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get machine learning memory usage info. Get information about how machine learning jobs and trained models are using memory, on each node, both within the JVM heap, and natively, outside of the JVM.

``_ :param node_id: The names of particular nodes in the cluster to target. For example, `nodeId1,nodeId2` or `ml:true` :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_ml/memory/{__path_parts["node_id"]}/_stats' else: __path_parts = {} __path = "/_ml/memory/_stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_memory_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_model_snapshot_upgrade_stats( self, *, job_id: str, snapshot_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly detection job model snapshot upgrade usage info.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no jobs that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty jobs array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_upgrade/_stats' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_model_snapshot_upgrade_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("desc", "end", "page", "sort", "start"), parameter_aliases={"from": "from_"}, ) def get_model_snapshots( self, *, job_id: str, snapshot_id: t.Optional[str] = None, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get model snapshots info.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. You can get information for multiple snapshots by using a comma-separated list or a wildcard expression. You can get all snapshots by using `_all`, by specifying `*` as the snapshot ID, or by omitting the snapshot ID. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param from_: Skips the specified number of snapshots. :param page: :param size: Specifies the maximum number of snapshots to obtain. :param sort: Refer to the description for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] if job_id not in SKIP_IN_PATH and snapshot_id not in SKIP_IN_PATH: __path_parts = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}' elif job_id not in SKIP_IN_PATH: __path_parts = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if page is not None: __body["page"] = page if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_model_snapshots", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_no_match", "bucket_span", "end", "exclude_interim", "overall_score", "start", "top_n", ), ) def get_overall_buckets( self, *, job_id: str, allow_no_match: t.Optional[bool] = None, bucket_span: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, overall_score: t.Optional[t.Union[float, str]] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, top_n: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get overall bucket results.

Retrievs overall bucket results that summarize the bucket results of multiple anomaly detection jobs.

The overall_score is calculated by combining the scores of all the buckets within the overall bucket span. First, the maximum anomaly_score per anomaly detection job in the overall bucket is calculated. Then the top_n of those scores are averaged to result in the overall_score. This means that you can fine-tune the overall_score so that it is more or less sensitive to the number of jobs that detect an anomaly at the same time. For example, if you set top_n to 1, the overall_score is the maximum bucket score in the overall bucket. Alternatively, if you set top_n to the number of jobs, the overall_score is high only when all jobs detect anomalies in that overall bucket. If you set the bucket_span parameter (to a value greater than its default), the overall_score is the maximum overall_score of the overall buckets that have a span equal to the jobs' largest bucket span.

``_ :param job_id: Identifier for the anomaly detection job. It can be a job identifier, a group name, a comma-separated list of jobs or groups, or a wildcard expression. You can summarize the bucket results for all anomaly detection jobs by using `_all` or by specifying `*` as the ``. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param bucket_span: Refer to the description for the `bucket_span` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param overall_score: Refer to the description for the `overall_score` query parameter. :param start: Refer to the description for the `start` query parameter. :param top_n: Refer to the description for the `top_n` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = ( f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/overall_buckets' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if bucket_span is not None: __body["bucket_span"] = bucket_span if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if overall_score is not None: __body["overall_score"] = overall_score if start is not None: __body["start"] = start if top_n is not None: __body["top_n"] = top_n if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_overall_buckets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "desc", "end", "exclude_interim", "page", "record_score", "sort", "start", ), parameter_aliases={"from": "from_"}, ) def get_records( self, *, job_id: str, desc: t.Optional[bool] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, exclude_interim: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, page: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, record_score: t.Optional[float] = None, size: t.Optional[int] = None, sort: t.Optional[str] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get anomaly records for an anomaly detection job. Records contain the detailed analytical results. They describe the anomalous activity that has been identified in the input data based on the detector configuration. There can be many anomaly records depending on the characteristics and size of the input data. In practice, there are often too many to be able to manually process them. The machine learning features therefore perform a sophisticated aggregation of the anomaly records into buckets. The number of record results depends on the number of anomalies found in each bucket, which relates to the number of time series being modeled and the number of detectors.

``_ :param job_id: Identifier for the anomaly detection job. :param desc: Refer to the description for the `desc` query parameter. :param end: Refer to the description for the `end` query parameter. :param exclude_interim: Refer to the description for the `exclude_interim` query parameter. :param from_: Skips the specified number of records. :param page: :param record_score: Refer to the description for the `record_score` query parameter. :param size: Specifies the maximum number of records to obtain. :param sort: Refer to the description for the `sort` query parameter. :param start: Refer to the description for the `start` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/results/records' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if not __body: if desc is not None: __body["desc"] = desc if end is not None: __body["end"] = end if exclude_interim is not None: __body["exclude_interim"] = exclude_interim if page is not None: __body["page"] = page if record_score is not None: __body["record_score"] = record_score if sort is not None: __body["sort"] = sort if start is not None: __body["start"] = start if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.get_records", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_trained_models( self, *, model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, decompress_definition: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, include: t.Optional[ t.Union[ str, t.Literal[ "definition", "definition_status", "feature_importance_baseline", "hyperparameters", "total_feature_importance", ], ] ] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, tags: t.Optional[t.Union[str, t.Sequence[str]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get trained model configuration info.

``_ :param model_id: The unique identifier of the trained model or a model alias. You can get information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. :param decompress_definition: Specifies whether the included model definition should be returned as a JSON map (true) or in a custom compressed format (false). :param exclude_generated: Indicates if certain fields should be removed from the configuration on retrieval. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of models. :param include: A comma delimited string of optional fields to include in the response body. :param size: Specifies the maximum number of models to obtain. :param tags: A comma delimited string of tags. A trained model can have many tags, or none. When supplied, only trained models that contain all the supplied tags are returned. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' else: __path_parts = {} __path = "/_ml/trained_models" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if decompress_definition is not None: __query["decompress_definition"] = decompress_definition if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if include is not None: __query["include"] = include if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if tags is not None: __query["tags"] = tags __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_trained_models", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_trained_models_stats( self, *, model_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get trained models usage info. You can get usage information for multiple trained models in a single API request by using a comma-separated list of model IDs or a wildcard expression.

``_ :param model_id: The unique identifier of the trained model or a model alias. It can be a comma-separated list or a wildcard expression. :param allow_no_match: Specifies what to do when the request: - Contains wildcard expressions and there are no models that match. - Contains the _all string or no identifiers and there are no matches. - Contains wildcard expressions and there are only partial matches. If true, it returns an empty array when there are no matches and the subset of results when there are partial matches. :param from_: Skips the specified number of models. :param size: Specifies the maximum number of models to obtain. """ __path_parts: t.Dict[str, str] if model_id not in SKIP_IN_PATH: __path_parts = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/_stats' else: __path_parts = {} __path = "/_ml/trained_models/_stats" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.get_trained_models_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("docs", "inference_config"), ) def infer_trained_model( self, *, model_id: str, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Evaluate a trained model.

``_ :param model_id: The unique identifier of the trained model. :param docs: An array of objects to pass to the model for inference. The objects should contain a fields matching your configured trained model input. Typically, for NLP models, the field name is `text_field`. Currently, for NLP models, only a single value is allowed. :param inference_config: The inference configuration updates to apply on the API call :param timeout: Controls the amount of time to wait for inference results. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/_infer' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if docs is not None: __body["docs"] = docs if inference_config is not None: __body["inference_config"] = inference_config __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.infer_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() def info( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get machine learning information. Get defaults and limits used by machine learning. This endpoint is designed to be used by a user interface that needs to fully understand machine learning configurations where some options are not specified, meaning that the defaults should be used. This endpoint may be used to find out what those defaults are. It also provides information about the maximum size of machine learning jobs that could run in the current cluster configuration.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/info" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ml.info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("timeout",), ) def open_job( self, *, job_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Open anomaly detection jobs.

An anomaly detection job must be opened to be ready to receive and analyze data. It can be opened and closed multiple times throughout its lifecycle. When you open a new job, it starts with an empty model. When you open an existing job, the most recent model state is automatically loaded. The job is ready to resume its analysis from where it left off, once new data is received.

``_ :param job_id: Identifier for the anomaly detection job. :param timeout: Refer to the description for the `timeout` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_open' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.open_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("events",), ) def post_calendar_events( self, *, calendar_id: str, events: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add scheduled events to the calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param events: A list of one of more scheduled events. The event’s start and end times can be specified as integer milliseconds since the epoch or as a string in ISO 8601 format. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if events is None and body is None: raise ValueError("Empty value passed for parameter 'events'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/events' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if events is not None: __body["events"] = events __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.post_calendar_events", path_parts=__path_parts, ) @_rewrite_parameters( body_name="data", ) def post_data( self, *, job_id: str, data: t.Optional[t.Sequence[t.Any]] = None, body: t.Optional[t.Sequence[t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reset_end: t.Optional[t.Union[str, t.Any]] = None, reset_start: t.Optional[t.Union[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Send data to an anomaly detection job for analysis.

IMPORTANT: For each job, data can be accepted from only a single connection at a time. It is not currently possible to post data to multiple jobs using wildcards or a comma-separated list.

``_ :param job_id: Identifier for the anomaly detection job. The job must have a state of open to receive and process the data. :param data: :param reset_end: Specifies the end of the bucket resetting range. :param reset_start: Specifies the start of the bucket resetting range. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if data is None and body is None: raise ValueError( "Empty value passed for parameters 'data' and 'body', one of them should be set." ) elif data is not None and body is not None: raise ValueError("Cannot set both 'data' and 'body'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_data' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if reset_end is not None: __query["reset_end"] = reset_end if reset_start is not None: __query["reset_start"] = reset_start __body = data if data is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.post_data", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("config",), ) def preview_data_frame_analytics( self, *, id: t.Optional[str] = None, config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview features used by data frame analytics. Preview the extracted features used by a data frame analytics config.

``_ :param id: Identifier for the data frame analytics job. :param config: A data frame analytics config as described in create data frame analytics jobs. Note that `id` and `dest` don’t need to be provided in the context of this API. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_preview' else: __path_parts = {} __path = "/_ml/data_frame/analytics/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if config is not None: __body["config"] = config if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.preview_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("datafeed_config", "job_config"), ) def preview_datafeed( self, *, datafeed_id: t.Optional[str] = None, datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_config: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview a datafeed. This API returns the first "page" of search results from a datafeed. You can preview an existing datafeed or provide configuration details for a datafeed and anomaly detection job in the API. The preview shows the structure of the data that will be passed to the anomaly detection engine. IMPORTANT: When Elasticsearch security features are enabled, the preview uses the credentials of the user that called the API. However, when the datafeed starts it uses the roles of the last user that created or updated the datafeed. To get a preview that accurately reflects the behavior of the datafeed, use the appropriate credentials. You can also use secondary authorization headers to supply the credentials.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. NOTE: If you use this path parameter, you cannot provide datafeed or anomaly detection job configuration details in the request body. :param datafeed_config: The datafeed definition to preview. :param end: The end time when the datafeed preview should stop :param job_config: The configuration details for the anomaly detection job that is associated with the datafeed. If the `datafeed_config` object does not include a `job_id` that references an existing anomaly detection job, you must supply this `job_config` object. If you include both a `job_id` and a `job_config`, the latter information is used. You cannot specify a `job_config` object unless you also supply a `datafeed_config` object. :param start: The start time from where the datafeed preview should begin """ __path_parts: t.Dict[str, str] if datafeed_id not in SKIP_IN_PATH: __path_parts = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_preview' else: __path_parts = {} __path = "/_ml/datafeeds/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if end is not None: __query["end"] = end if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if start is not None: __query["start"] = start if not __body: if datafeed_config is not None: __body["datafeed_config"] = datafeed_config if job_config is not None: __body["job_config"] = job_config if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.preview_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "job_ids"), ) def put_calendar( self, *, calendar_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_ids: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param description: A description of the calendar. :param job_ids: An array of anomaly detection job identifiers. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") __path_parts: t.Dict[str, str] = {"calendar_id": _quote(calendar_id)} __path = f'/_ml/calendars/{__path_parts["calendar_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if job_ids is not None: __body["job_ids"] = job_ids if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_calendar", path_parts=__path_parts, ) @_rewrite_parameters() def put_calendar_job( self, *, calendar_id: str, job_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Add anomaly detection job to calendar.

``_ :param calendar_id: A string that uniquely identifies a calendar. :param job_id: An identifier for the anomaly detection jobs. It can be a job identifier, a group name, or a comma-separated list of jobs or groups. """ if calendar_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'calendar_id'") if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = { "calendar_id": _quote(calendar_id), "job_id": _quote(job_id), } __path = f'/_ml/calendars/{__path_parts["calendar_id"]}/jobs/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="ml.put_calendar_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis", "dest", "source", "allow_lazy_start", "analyzed_fields", "description", "headers", "max_num_threads", "meta", "model_memory_limit", "version", ), parameter_aliases={"_meta": "meta"}, ignore_deprecated_options={"headers"}, ) def put_data_frame_analytics( self, *, id: str, analysis: t.Optional[t.Mapping[str, t.Any]] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_start: t.Optional[bool] = None, analyzed_fields: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, version: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a data frame analytics job. This API creates a data frame analytics job that performs an analysis on the source indices and stores the outcome in a destination index. By default, the query used in the source configuration is {"match_all": {}}.

If the destination index does not exist, it is created automatically when you start the job.

If you supply only a subset of the regression or classification parameters, hyperparameter optimization occurs. It determines a value for each of the undefined parameters.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param analysis: The analysis configuration, which contains the information necessary to perform one of the following types of analysis: classification, outlier detection, or regression. :param dest: The destination configuration. :param source: The configuration of how to source the analysis data. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If set to `false` and a machine learning node with capacity to run the job cannot be immediately found, the API returns an error. If set to `true`, the API does not return an error; the job waits in the `starting` state until sufficient machine learning node capacity is available. This behavior is also affected by the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. :param analyzed_fields: Specifies `includes` and/or `excludes` patterns to select which fields will be included in the analysis. The patterns specified in `excludes` are applied last, therefore `excludes` takes precedence. In other words, if the same field is specified in both `includes` and `excludes`, then the field will not be included in the analysis. If `analyzed_fields` is not set, only the relevant fields will be included. For example, all the numeric fields for outlier detection. The supported fields vary for each type of analysis. Outlier detection requires numeric or `boolean` data to analyze. The algorithms don’t support missing values therefore fields that have data types other than numeric or boolean are ignored. Documents where included fields contain missing values, null values, or an array are also ignored. Therefore the `dest` index may contain documents that don’t have an outlier score. Regression supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the regression analysis. Classification supports fields that are numeric, `boolean`, `text`, `keyword`, and `ip` data types. It is also tolerant of missing values. Fields that are supported are included in the analysis, other fields are ignored. Documents where included fields contain an array with two or more values are also ignored. Documents in the `dest` index that don’t contain a results field are not included in the classification analysis. Classification analysis can be improved by mapping ordinal variable values to a single number. For example, in case of age ranges, you can model the values as `0-14 = 0`, `15-24 = 1`, `25-34 = 2`, and so on. :param description: A description of the job. :param headers: :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param meta: :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. :param version: """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if analysis is None and body is None: raise ValueError("Empty value passed for parameter 'analysis'") if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis is not None: __body["analysis"] = analysis if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if analyzed_fields is not None: __body["analyzed_fields"] = analyzed_fields if description is not None: __body["description"] = description if headers is not None: __body["headers"] = headers if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if meta is not None: __body["_meta"] = meta if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit if version is not None: __body["version"] = version __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "chunking_config", "delayed_data_check_config", "frequency", "headers", "indexes", "indices", "indices_options", "job_id", "max_empty_searches", "query", "query_delay", "runtime_mappings", "script_fields", "scroll_size", ), ignore_deprecated_options={"headers"}, ) def put_datafeed( self, *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indexes: t.Optional[t.Union[str, t.Sequence[str]]] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, indices_options: t.Optional[t.Mapping[str, t.Any]] = None, job_id: t.Optional[str] = None, max_empty_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, query_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll_size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a datafeed. Datafeeds retrieve data from Elasticsearch for analysis by an anomaly detection job. You can associate only one datafeed with each anomaly detection job. The datafeed contains a query that runs at a defined interval (frequency). If you are concerned about delayed data, you can add a delay (query_delay') at each interval. By default, the datafeed uses the following query: {"match_all": {"boost": 1}}`.

When Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. You must use Kibana, this API, or the create anomaly detection jobs API to create a datafeed. Do not add a datafeed directly to the .ml-config index. Do not give users write privileges on the .ml-config index.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param aggs: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If true, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param chunking_config: Datafeeds might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. :param delayed_data_check_config: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. :param headers: :param ignore_throttled: If true, concrete, expanded, or aliased indices are ignored when frozen. :param ignore_unavailable: If true, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the master nodes and the machine learning nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search :param job_id: Identifier for the anomaly detection job. :param max_empty_searches: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. :param query: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. :param query_delay: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. :param runtime_mappings: Specifies runtime fields for the datafeed search. :param script_fields: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. :param scroll_size: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`, which is 10,000 by default. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: __body["delayed_data_check_config"] = delayed_data_check_config if frequency is not None: __body["frequency"] = frequency if headers is not None: __body["headers"] = headers if indexes is not None: __body["indexes"] = indexes if indices is not None: __body["indices"] = indices if indices_options is not None: __body["indices_options"] = indices_options if job_id is not None: __body["job_id"] = job_id if max_empty_searches is not None: __body["max_empty_searches"] = max_empty_searches if query is not None: __body["query"] = query if query_delay is not None: __body["query_delay"] = query_delay if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if scroll_size is not None: __body["scroll_size"] = scroll_size __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "items"), ) def put_filter( self, *, filter_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, items: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a filter. A filter contains a list of strings. It can be used by one or more anomaly detection jobs. Specifically, filters are referenced in the custom_rules property of detector configuration objects.

``_ :param filter_id: A string that uniquely identifies a filter. :param description: A description of the filter. :param items: The items of the filter. A wildcard `*` can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if items is not None: __body["items"] = items __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_filter", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "data_description", "allow_lazy_open", "analysis_limits", "background_persist_interval", "custom_settings", "daily_model_snapshot_retention_after_days", "datafeed_config", "description", "groups", "model_plot_config", "model_snapshot_retention_days", "renormalization_window_days", "results_index_name", "results_retention_days", ), ) def put_job( self, *, job_id: str, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, allow_lazy_open: t.Optional[bool] = None, allow_no_indices: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, custom_settings: t.Optional[t.Any] = None, daily_model_snapshot_retention_after_days: t.Optional[int] = None, datafeed_config: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, renormalization_window_days: t.Optional[int] = None, results_index_name: t.Optional[str] = None, results_retention_days: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an anomaly detection job.

If you include a datafeed_config, you must have read index privileges on the source index. If you include a datafeed_config but do not provide a query, the datafeed uses {"match_all": {"boost": 1}}.

``_ :param job_id: The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param analysis_config: Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. :param data_description: Defines the format of the input data when you send data to the job by using the post data API. Note that when configure a datafeed, these properties are automatically set. When data is received via the post data API, it is not stored in Elasticsearch. Only the results for anomaly detection are retained. :param allow_lazy_open: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. By default, if a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to true, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. :param allow_no_indices: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param analysis_limits: Limits can be applied for the resources required to hold the mathematical models in memory. These limits are approximate and can be set per job. They do not control the memory used by other processes, for example the Elasticsearch Java processes. :param background_persist_interval: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the `background_persist_interval` value too low. :param custom_settings: Advanced configuration option. Contains custom meta data about the job. :param daily_model_snapshot_retention_after_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. :param datafeed_config: Defines a datafeed for the anomaly detection job. If Elasticsearch security features are enabled, your datafeed remembers which roles the user who created it had at the time of creation and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead. :param description: A description of the job. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param groups: A list of job groups. A job can belong to no groups or many. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If `true`, unavailable indices (missing or closed) are ignored. :param model_plot_config: This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. If you enable model plot it can add considerable overhead to the performance of the system; it is not feasible for jobs with many entities. Model plot provides a simplified and indicative view of the model and its bounds. It does not display complex features such as multivariate correlations or multimodal data. As such, anomalies may occasionally be reported which cannot be seen in the model plot. Model plot config can be configured when the job is created or updated later. It must be disabled if performance issues are experienced. :param model_snapshot_retention_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. By default, snapshots ten days older than the newest snapshot are deleted. :param renormalization_window_days: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. The default value is the longer of 30 days or 100 bucket spans. :param results_index_name: A text string that affects the name of the machine learning results index. By default, the job generates an index named `.ml-anomalies-shared`. :param results_retention_days: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. Annotations generated by the system also count as results for retention purposes; they are deleted after the same number of days as results. Annotations added by users are retained forever. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if analysis_config is None and body is None: raise ValueError("Empty value passed for parameter 'analysis_config'") if data_description is None and body is None: raise ValueError("Empty value passed for parameter 'data_description'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if data_description is not None: __body["data_description"] = data_description if allow_lazy_open is not None: __body["allow_lazy_open"] = allow_lazy_open if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if background_persist_interval is not None: __body["background_persist_interval"] = background_persist_interval if custom_settings is not None: __body["custom_settings"] = custom_settings if daily_model_snapshot_retention_after_days is not None: __body["daily_model_snapshot_retention_after_days"] = ( daily_model_snapshot_retention_after_days ) if datafeed_config is not None: __body["datafeed_config"] = datafeed_config if description is not None: __body["description"] = description if groups is not None: __body["groups"] = groups if model_plot_config is not None: __body["model_plot_config"] = model_plot_config if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if renormalization_window_days is not None: __body["renormalization_window_days"] = renormalization_window_days if results_index_name is not None: __body["results_index_name"] = results_index_name if results_retention_days is not None: __body["results_retention_days"] = results_retention_days __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "compressed_definition", "definition", "description", "inference_config", "input", "metadata", "model_size_bytes", "model_type", "platform_architecture", "prefix_strings", "tags", ), ) def put_trained_model( self, *, model_id: str, compressed_definition: t.Optional[str] = None, defer_definition_decompression: t.Optional[bool] = None, definition: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, inference_config: t.Optional[t.Mapping[str, t.Any]] = None, input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Any] = None, model_size_bytes: t.Optional[int] = None, model_type: t.Optional[ t.Union[str, t.Literal["lang_ident", "pytorch", "tree_ensemble"]] ] = None, platform_architecture: t.Optional[str] = None, prefix_strings: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, tags: t.Optional[t.Sequence[str]] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a trained model. Enable you to supply a trained model that is not created by data frame analytics.

``_ :param model_id: The unique identifier of the trained model. :param compressed_definition: The compressed (GZipped and Base64 encoded) inference definition of the model. If compressed_definition is specified, then definition cannot be specified. :param defer_definition_decompression: If set to `true` and a `compressed_definition` is provided, the request defers definition decompression and skips relevant validations. :param definition: The inference definition for the model. If definition is specified, then compressed_definition cannot be specified. :param description: A human-readable description of the inference trained model. :param inference_config: The default configuration for inference. This can be either a regression or classification configuration. It must match the underlying definition.trained_model's target_type. For pre-packaged models such as ELSER the config is not required. :param input: The input field names for the model definition. :param metadata: An object map that contains metadata about the model. :param model_size_bytes: The estimated memory usage in bytes to keep the trained model in memory. This property is supported only if defer_definition_decompression is true or the model definition is not supplied. :param model_type: The model type. :param platform_architecture: The platform architecture (if applicable) of the trained mode. If the model only works on one platform, because it is heavily optimized for a particular processor architecture and OS combination, then this field specifies which. The format of the string must match the platform identifiers used by Elasticsearch, so one of, `linux-x86_64`, `linux-aarch64`, `darwin-x86_64`, `darwin-aarch64`, or `windows-x86_64`. For portable models (those that work independent of processor architecture or OS features), leave this field unset. :param prefix_strings: Optional prefix strings applied at inference :param tags: An array of tags to organize the model. :param wait_for_completion: Whether to wait for all child operations (e.g. model download) to complete. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_definition_decompression is not None: __query["defer_definition_decompression"] = defer_definition_decompression if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if compressed_definition is not None: __body["compressed_definition"] = compressed_definition if definition is not None: __body["definition"] = definition if description is not None: __body["description"] = description if inference_config is not None: __body["inference_config"] = inference_config if input is not None: __body["input"] = input if metadata is not None: __body["metadata"] = metadata if model_size_bytes is not None: __body["model_size_bytes"] = model_size_bytes if model_type is not None: __body["model_type"] = model_type if platform_architecture is not None: __body["platform_architecture"] = platform_architecture if prefix_strings is not None: __body["prefix_strings"] = prefix_strings if tags is not None: __body["tags"] = tags __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model", path_parts=__path_parts, ) @_rewrite_parameters() def put_trained_model_alias( self, *, model_id: str, model_alias: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, reassign: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a trained model alias. A trained model alias is a logical name used to reference a single trained model. You can use aliases instead of trained model identifiers to make it easier to reference your models. For example, you can use aliases in inference aggregations and processors. An alias must be unique and refer to only a single trained model. However, you can have multiple aliases for each trained model. If you use this API to update an alias such that it references a different trained model ID and the model uses a different type of data frame analytics, an error occurs. For example, this situation occurs if you have a trained model for regression analysis and a trained model for classification analysis; you cannot reassign an alias from one type of trained model to another. If you use this API to update an alias and there are very few input fields in common between the old and new trained models for the model alias, the API returns a warning.

``_ :param model_id: The identifier for the trained model that the alias refers to. :param model_alias: The alias to create or update. This value cannot end in numbers. :param reassign: Specifies whether the alias gets reassigned to the specified trained model if it is already assigned to a different model. If the alias is already assigned and this parameter is false, the API returns an error. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if model_alias in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_alias'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "model_alias": _quote(model_alias), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/model_aliases/{__path_parts["model_alias"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if reassign is not None: __query["reassign"] = reassign __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="ml.put_trained_model_alias", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("definition", "total_definition_length", "total_parts"), ) def put_trained_model_definition_part( self, *, model_id: str, part: int, definition: t.Optional[str] = None, total_definition_length: t.Optional[int] = None, total_parts: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create part of a trained model definition.

``_ :param model_id: The unique identifier of the trained model. :param part: The definition part number. When the definition is loaded for inference the definition parts are streamed in the order of their part number. The first part must be `0` and the final part must be `total_parts - 1`. :param definition: The definition part for the model. Must be a base64 encoded string. :param total_definition_length: The total uncompressed definition length in bytes. Not base64 encoded. :param total_parts: The total number of parts that will be uploaded. Must be greater than 0. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if part in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'part'") if definition is None and body is None: raise ValueError("Empty value passed for parameter 'definition'") if total_definition_length is None and body is None: raise ValueError( "Empty value passed for parameter 'total_definition_length'" ) if total_parts is None and body is None: raise ValueError("Empty value passed for parameter 'total_parts'") __path_parts: t.Dict[str, str] = { "model_id": _quote(model_id), "part": _quote(part), } __path = f'/_ml/trained_models/{__path_parts["model_id"]}/definition/{__path_parts["part"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if definition is not None: __body["definition"] = definition if total_definition_length is not None: __body["total_definition_length"] = total_definition_length if total_parts is not None: __body["total_parts"] = total_parts __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model_definition_part", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("vocabulary", "merges", "scores"), ) def put_trained_model_vocabulary( self, *, model_id: str, vocabulary: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, merges: t.Optional[t.Sequence[str]] = None, pretty: t.Optional[bool] = None, scores: t.Optional[t.Sequence[float]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a trained model vocabulary. This API is supported only for natural language processing (NLP) models. The vocabulary is stored in the index as described in inference_config.*.vocabulary of the trained model definition.

``_ :param model_id: The unique identifier of the trained model. :param vocabulary: The model vocabulary, which must not be empty. :param merges: The optional model merges if required by the tokenizer. :param scores: The optional vocabulary value scores if required by the tokenizer. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") if vocabulary is None and body is None: raise ValueError("Empty value passed for parameter 'vocabulary'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/vocabulary' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if vocabulary is not None: __body["vocabulary"] = vocabulary if merges is not None: __body["merges"] = merges if scores is not None: __body["scores"] = scores __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.put_trained_model_vocabulary", path_parts=__path_parts, ) @_rewrite_parameters() def reset_job( self, *, job_id: str, delete_user_annotations: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset an anomaly detection job. All model state and results are deleted. The job is ready to start over as if it had just been created. It is not currently possible to reset multiple jobs using wildcards or a comma separated list.

``_ :param job_id: The ID of the job to reset. :param delete_user_annotations: Specifies whether annotations that have been added by the user should be deleted along with any auto-generated annotations when the job is reset. :param wait_for_completion: Should this request wait until the operation has completed before returning. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_reset' __query: t.Dict[str, t.Any] = {} if delete_user_annotations is not None: __query["delete_user_annotations"] = delete_user_annotations if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.reset_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("delete_intervening_results",), ) def revert_model_snapshot( self, *, job_id: str, snapshot_id: str, delete_intervening_results: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Revert to a snapshot. The machine learning features react quickly to anomalous input, learning new behaviors in data. Highly anomalous input increases the variance in the models whilst the system learns whether this is a new step-change in behavior or a one-off event. In the case where this anomalous input is known to be a one-off, then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: You can specify `empty` as the . Reverting to the empty snapshot means the anomaly detection job starts learning a new model from scratch when it is started. :param delete_intervening_results: Refer to the description for the `delete_intervening_results` query parameter. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_revert' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if delete_intervening_results is not None: __body["delete_intervening_results"] = delete_intervening_results if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.revert_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters() def set_upgrade_mode( self, *, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set upgrade_mode for ML indices. Sets a cluster wide upgrade_mode setting that prepares machine learning indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your machine learning indices. In those circumstances, there must be no machine learning jobs running. You can close the machine learning jobs, do the upgrade, then open all the jobs again. Alternatively, you can use this API to temporarily halt tasks associated with the jobs and datafeeds and prevent new jobs from opening. You can also use this API during upgrades that do not require you to reindex your machine learning indices, though stopping jobs is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get machine learning info API.

``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all job and datafeed tasks and prohibits new job and datafeed tasks from starting. :param timeout: The time to wait for the request to be completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/set_upgrade_mode" __query: t.Dict[str, t.Any] = {} if enabled is not None: __query["enabled"] = enabled if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.set_upgrade_mode", path_parts=__path_parts, ) @_rewrite_parameters() def start_data_frame_analytics( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a data frame analytics job. A data frame analytics job can be started and stopped multiple times throughout its lifecycle. If the destination index does not exist, it is created automatically the first time you start the data frame analytics job. The index.number_of_shards and index.number_of_replicas settings for the destination index are copied from the source index. If there are multiple source indices, the destination index copies the highest setting values. The mappings for the destination index are also copied from the source indices. If there are any mapping conflicts, the job fails to start. If the destination index exists, it is used as is. You can therefore set up the destination index in advance with custom settings and mappings.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param timeout: Controls the amount of time to wait until the data frame analytics job starts. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.start_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("end", "start", "timeout"), ) def start_datafeed( self, *, datafeed_id: str, end: t.Optional[t.Union[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, start: t.Optional[t.Union[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start datafeeds.

A datafeed must be started in order to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.

Before you can start a datafeed, the anomaly detection job must be open. Otherwise, an error occurs.

If you restart a stopped datafeed, it continues processing input data from the next millisecond after it was stopped. If new data was indexed for that exact millisecond between stopping and starting, it will be ignored.

When Elasticsearch security features are enabled, your datafeed remembers which roles the last user to create or update it had at the time of creation or update and runs the query using those same roles. If you provided secondary authorization headers when you created or updated the datafeed, those credentials are used instead.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param end: Refer to the description for the `end` query parameter. :param start: Refer to the description for the `start` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_start' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if end is not None: __body["end"] = end if start is not None: __body["start"] = start if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.start_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("adaptive_allocations",), ) def start_trained_model_deployment( self, *, model_id: str, adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, cache_size: t.Optional[t.Union[int, str]] = None, deployment_id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, number_of_allocations: t.Optional[int] = None, pretty: t.Optional[bool] = None, priority: t.Optional[t.Union[str, t.Literal["low", "normal"]]] = None, queue_capacity: t.Optional[int] = None, threads_per_allocation: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for: t.Optional[ t.Union[str, t.Literal["fully_allocated", "started", "starting"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a trained model deployment. It allocates the model to every machine learning node.

``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. :param adaptive_allocations: Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. :param cache_size: The inference cache size (in memory outside the JVM heap) per node for the model. The default value is the same size as the `model_size_bytes`. To disable the cache, `0b` can be provided. :param deployment_id: A unique identifier for the deployment of the model. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. :param priority: The deployment priority. :param queue_capacity: Specifies the number of inference requests that are allowed in the queue. After the number of requests exceeds this value, new requests are rejected with a 429 error. :param threads_per_allocation: Sets the number of threads used by each model allocation during inference. This generally increases the inference speed. The inference process is a compute-bound process; any number greater than the number of available hardware threads on the machine does not increase the inference speed. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. :param timeout: Specifies the amount of time to wait for the model to deploy. :param wait_for: Specifies the allocation status to wait for before returning. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_start' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if cache_size is not None: __query["cache_size"] = cache_size if deployment_id is not None: __query["deployment_id"] = deployment_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if number_of_allocations is not None: __query["number_of_allocations"] = number_of_allocations if pretty is not None: __query["pretty"] = pretty if priority is not None: __query["priority"] = priority if queue_capacity is not None: __query["queue_capacity"] = queue_capacity if threads_per_allocation is not None: __query["threads_per_allocation"] = threads_per_allocation if timeout is not None: __query["timeout"] = timeout if wait_for is not None: __query["wait_for"] = wait_for if not __body: if adaptive_allocations is not None: __body["adaptive_allocations"] = adaptive_allocations if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.start_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters() def stop_data_frame_analytics( self, *, id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop data frame analytics jobs. A data frame analytics job can be started and stopped multiple times throughout its lifecycle.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no data frame analytics jobs that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. The default value is true, which returns an empty data_frame_analytics array when there are no matches and the subset of results when there are partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param force: If true, the data frame analytics job is stopped forcefully. :param timeout: Controls the amount of time to wait until the data frame analytics job stops. Defaults to 20 seconds. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.stop_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("allow_no_match", "force", "timeout"), ) def stop_datafeed( self, *, datafeed_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop datafeeds. A datafeed that is stopped ceases to retrieve data from Elasticsearch. A datafeed can be started and stopped multiple times throughout its lifecycle.

``_ :param datafeed_id: Identifier for the datafeed. You can stop multiple datafeeds in a single API request by using a comma-separated list of datafeeds or a wildcard expression. You can close all datafeeds by using `_all` or by specifying `*` as the identifier. :param allow_no_match: Refer to the description for the `allow_no_match` query parameter. :param force: Refer to the description for the `force` query parameter. :param timeout: Refer to the description for the `timeout` query parameter. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_stop' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_no_match is not None: __body["allow_no_match"] = allow_no_match if force is not None: __body["force"] = force if timeout is not None: __body["timeout"] = timeout if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.stop_datafeed", path_parts=__path_parts, ) @_rewrite_parameters() def stop_trained_model_deployment( self, *, model_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop a trained model deployment.

``_ :param model_id: The unique identifier of the trained model. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no deployments that match; contains the `_all` string or no identifiers and there are no matches; or contains wildcard expressions and there are only partial matches. By default, it returns an empty array when there are no matches and the subset of results when there are partial matches. If `false`, the request returns a 404 status code when there are no matches or only partial matches. :param force: Forcefully stops the deployment, even if it is used by ingest pipelines. You can't use these pipelines until you restart the model deployment. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.stop_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_start", "description", "max_num_threads", "model_memory_limit", ), ) def update_data_frame_analytics( self, *, id: str, allow_lazy_start: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_num_threads: t.Optional[int] = None, model_memory_limit: t.Optional[str] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a data frame analytics job.

``_ :param id: Identifier for the data frame analytics job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param allow_lazy_start: Specifies whether this job can start when there is insufficient machine learning node capacity for it to be immediately assigned to a node. :param description: A description of the job. :param max_num_threads: The maximum number of threads to be used by the analysis. Using more threads may decrease the time necessary to complete the analysis at the cost of using more CPU. Note that the process may use additional threads for operational functionality other than the analysis itself. :param model_memory_limit: The approximate maximum amount of memory resources that are permitted for analytical processing. If your `elasticsearch.yml` file contains an `xpack.ml.max_model_memory_limit` setting, an error occurs when you try to create data frame analytics jobs that have `model_memory_limit` values greater than that setting. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_ml/data_frame/analytics/{__path_parts["id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_start is not None: __body["allow_lazy_start"] = allow_lazy_start if description is not None: __body["description"] = description if max_num_threads is not None: __body["max_num_threads"] = max_num_threads if model_memory_limit is not None: __body["model_memory_limit"] = model_memory_limit __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_data_frame_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "chunking_config", "delayed_data_check_config", "frequency", "indexes", "indices", "indices_options", "job_id", "max_empty_searches", "query", "query_delay", "runtime_mappings", "script_fields", "scroll_size", ), ) def update_datafeed( self, *, datafeed_id: str, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, allow_no_indices: t.Optional[bool] = None, chunking_config: t.Optional[t.Mapping[str, t.Any]] = None, delayed_data_check_config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, ignore_throttled: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, indexes: t.Optional[t.Sequence[str]] = None, indices: t.Optional[t.Sequence[str]] = None, indices_options: t.Optional[t.Mapping[str, t.Any]] = None, job_id: t.Optional[str] = None, max_empty_searches: t.Optional[int] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, query_delay: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, script_fields: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, scroll_size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a datafeed. You must stop and start the datafeed for the changes to be applied. When Elasticsearch security features are enabled, your datafeed remembers which roles the user who updated it had at the time of the update and runs the query using those same roles. If you provide secondary authorization headers, those credentials are used instead.

``_ :param datafeed_id: A numerical character string that uniquely identifies the datafeed. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters. :param aggregations: If set, the datafeed performs aggregation searches. Support for aggregations is limited and should be used only with low cardinality data. :param allow_no_indices: If `true`, wildcard indices expressions that resolve into no concrete indices are ignored. This includes the `_all` string or when no indices are specified. :param chunking_config: Datafeeds might search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load on Elasticsearch is managed. Chunking configuration controls how the size of these time chunks are calculated; it is an advanced configuration option. :param delayed_data_check_config: Specifies whether the datafeed checks for missing data and the size of the window. The datafeed can optionally search over indices that have already been read in an effort to determine whether any data has subsequently been added to the index. If missing data is found, it is a good indication that the `query_delay` is set too low and the data is being indexed after the datafeed has passed that moment in time. This check runs only on real-time datafeeds. :param expand_wildcards: Type of index that wildcard patterns can match. If the request can target data streams, this argument determines whether wildcard expressions match hidden data streams. Supports comma-separated values. :param frequency: The interval at which scheduled queries are made while the datafeed runs in real time. The default value is either the bucket span for short bucket spans, or, for longer bucket spans, a sensible fraction of the bucket span. When `frequency` is shorter than the bucket span, interim results for the last (partial) bucket are written then eventually overwritten by the full bucket results. If the datafeed uses aggregations, this value must be divisible by the interval of the date histogram aggregation. :param ignore_throttled: If `true`, concrete, expanded or aliased indices are ignored when frozen. :param ignore_unavailable: If `true`, unavailable indices (missing or closed) are ignored. :param indexes: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. :param indices: An array of index names. Wildcards are supported. If any of the indices are in remote clusters, the machine learning nodes must have the `remote_cluster_client` role. :param indices_options: Specifies index expansion options that are used during search. :param job_id: :param max_empty_searches: If a real-time datafeed has never seen any data (including during any initial training period), it automatically stops and closes the associated job after this many real-time searches return no documents. In other words, it stops after `frequency` times `max_empty_searches` of real-time operation. If not set, a datafeed with no end time that sees no data remains started until it is explicitly stopped. By default, it is not set. :param query: The Elasticsearch query domain-specific language (DSL). This value corresponds to the query object in an Elasticsearch search POST body. All the options that are supported by Elasticsearch can be used, as this object is passed verbatim to Elasticsearch. Note that if you change the query, the analyzed data is also changed. Therefore, the time required to learn might be long and the understandability of the results is unpredictable. If you want to make significant changes to the source data, it is recommended that you clone the job and datafeed and make the amendments in the clone. Let both run in parallel and close one when you are satisfied with the results of the job. :param query_delay: The number of seconds behind real time that data is queried. For example, if data from 10:04 a.m. might not be searchable in Elasticsearch until 10:06 a.m., set this property to 120 seconds. The default value is randomly selected between `60s` and `120s`. This randomness improves the query performance when there are multiple jobs running on the same node. :param runtime_mappings: Specifies runtime fields for the datafeed search. :param script_fields: Specifies scripts that evaluate custom expressions and returns script fields to the datafeed. The detector configuration objects in a job can contain functions that use these script fields. :param scroll_size: The size parameter that is used in Elasticsearch searches when the datafeed does not use aggregations. The maximum value is the value of `index.max_result_window`. """ if datafeed_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'datafeed_id'") __path_parts: t.Dict[str, str] = {"datafeed_id": _quote(datafeed_id)} __path = f'/_ml/datafeeds/{__path_parts["datafeed_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_throttled is not None: __query["ignore_throttled"] = ignore_throttled if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty if not __body: if aggregations is not None: __body["aggregations"] = aggregations if chunking_config is not None: __body["chunking_config"] = chunking_config if delayed_data_check_config is not None: __body["delayed_data_check_config"] = delayed_data_check_config if frequency is not None: __body["frequency"] = frequency if indexes is not None: __body["indexes"] = indexes if indices is not None: __body["indices"] = indices if indices_options is not None: __body["indices_options"] = indices_options if job_id is not None: __body["job_id"] = job_id if max_empty_searches is not None: __body["max_empty_searches"] = max_empty_searches if query is not None: __body["query"] = query if query_delay is not None: __body["query_delay"] = query_delay if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if script_fields is not None: __body["script_fields"] = script_fields if scroll_size is not None: __body["scroll_size"] = scroll_size __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_datafeed", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("add_items", "description", "remove_items"), ) def update_filter( self, *, filter_id: str, add_items: t.Optional[t.Sequence[str]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, remove_items: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a filter. Updates the description of a filter, adds items, or removes items from the list.

``_ :param filter_id: A string that uniquely identifies a filter. :param add_items: The items to add to the filter. :param description: A description for the filter. :param remove_items: The items to remove from the filter. """ if filter_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'filter_id'") __path_parts: t.Dict[str, str] = {"filter_id": _quote(filter_id)} __path = f'/_ml/filters/{__path_parts["filter_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if add_items is not None: __body["add_items"] = add_items if description is not None: __body["description"] = description if remove_items is not None: __body["remove_items"] = remove_items __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_filter", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_lazy_open", "analysis_limits", "background_persist_interval", "categorization_filters", "custom_settings", "daily_model_snapshot_retention_after_days", "description", "detectors", "groups", "model_plot_config", "model_prune_window", "model_snapshot_retention_days", "per_partition_categorization", "renormalization_window_days", "results_retention_days", ), ) def update_job( self, *, job_id: str, allow_lazy_open: t.Optional[bool] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, background_persist_interval: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, categorization_filters: t.Optional[t.Sequence[str]] = None, custom_settings: t.Optional[t.Mapping[str, t.Any]] = None, daily_model_snapshot_retention_after_days: t.Optional[int] = None, description: t.Optional[str] = None, detectors: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[t.Sequence[str]] = None, human: t.Optional[bool] = None, model_plot_config: t.Optional[t.Mapping[str, t.Any]] = None, model_prune_window: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, model_snapshot_retention_days: t.Optional[int] = None, per_partition_categorization: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, renormalization_window_days: t.Optional[int] = None, results_retention_days: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an anomaly detection job. Updates certain properties of an anomaly detection job.

``_ :param job_id: Identifier for the job. :param allow_lazy_open: Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node. If `false` and a machine learning node with capacity to run the job cannot immediately be found, the open anomaly detection jobs API returns an error. However, this is also subject to the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting. If this option is set to `true`, the open anomaly detection jobs API does not return an error and the job waits in the opening state until sufficient machine learning node capacity is available. :param analysis_limits: :param background_persist_interval: Advanced configuration option. The time between each periodic persistence of the model. The default value is a randomized value between 3 to 4 hours, which avoids all jobs persisting at exactly the same time. The smallest allowed value is 1 hour. For very large models (several GB), persistence could take 10-20 minutes, so do not set the value too low. If the job is open when you make the update, you must stop the datafeed, close the job, then reopen the job and restart the datafeed for the changes to take effect. :param categorization_filters: :param custom_settings: Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information as shown in Adding custom URLs to machine learning results. :param daily_model_snapshot_retention_after_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies a period of time (in days) after which only the first snapshot per day is retained. This period is relative to the timestamp of the most recent snapshot for this job. Valid values range from 0 to `model_snapshot_retention_days`. For jobs created before version 7.8.0, the default value matches `model_snapshot_retention_days`. :param description: A description of the job. :param detectors: An array of detector update objects. :param groups: A list of job groups. A job can belong to no groups or many. :param model_plot_config: :param model_prune_window: :param model_snapshot_retention_days: Advanced configuration option, which affects the automatic removal of old model snapshots for this job. It specifies the maximum period of time (in days) that snapshots are retained. This period is relative to the timestamp of the most recent snapshot for this job. :param per_partition_categorization: Settings related to how categorization interacts with partition fields. :param renormalization_window_days: Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen. :param results_retention_days: Advanced configuration option. The period of time (in days) that results are retained. Age is calculated relative to the timestamp of the latest bucket result. If this property has a non-null value, once per day at 00:30 (server time), results that are the specified number of days older than the latest bucket result are deleted from Elasticsearch. The default value is null, which means all results are retained. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") __path_parts: t.Dict[str, str] = {"job_id": _quote(job_id)} __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_lazy_open is not None: __body["allow_lazy_open"] = allow_lazy_open if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if background_persist_interval is not None: __body["background_persist_interval"] = background_persist_interval if categorization_filters is not None: __body["categorization_filters"] = categorization_filters if custom_settings is not None: __body["custom_settings"] = custom_settings if daily_model_snapshot_retention_after_days is not None: __body["daily_model_snapshot_retention_after_days"] = ( daily_model_snapshot_retention_after_days ) if description is not None: __body["description"] = description if detectors is not None: __body["detectors"] = detectors if groups is not None: __body["groups"] = groups if model_plot_config is not None: __body["model_plot_config"] = model_plot_config if model_prune_window is not None: __body["model_prune_window"] = model_prune_window if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if per_partition_categorization is not None: __body["per_partition_categorization"] = per_partition_categorization if renormalization_window_days is not None: __body["renormalization_window_days"] = renormalization_window_days if results_retention_days is not None: __body["results_retention_days"] = results_retention_days __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("description", "retain"), ) def update_model_snapshot( self, *, job_id: str, snapshot_id: str, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, retain: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a snapshot. Updates certain properties of a snapshot.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: Identifier for the model snapshot. :param description: A description of the model snapshot. :param retain: If `true`, this snapshot will not be deleted during automatic cleanup of snapshots older than `model_snapshot_retention_days`. However, this snapshot will be deleted when the job is deleted. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if description is not None: __body["description"] = description if retain is not None: __body["retain"] = retain __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_model_snapshot", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("adaptive_allocations", "number_of_allocations"), ) def update_trained_model_deployment( self, *, model_id: str, adaptive_allocations: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, number_of_allocations: t.Optional[int] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a trained model deployment.

``_ :param model_id: The unique identifier of the trained model. Currently, only PyTorch models are supported. :param adaptive_allocations: Adaptive allocations configuration. When enabled, the number of allocations is set based on the current load. If adaptive_allocations is enabled, do not set the number of allocations manually. :param number_of_allocations: The number of model allocations on each node where the model is deployed. All allocations on a node share the same copy of the model in memory but use a separate set of threads to evaluate the model. Increasing this value generally increases the throughput. If this setting is greater than the number of hardware threads it will automatically be changed to a value less than the number of hardware threads. If adaptive_allocations is enabled, do not set this value, because it’s automatically set. """ if model_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'model_id'") __path_parts: t.Dict[str, str] = {"model_id": _quote(model_id)} __path = f'/_ml/trained_models/{__path_parts["model_id"]}/deployment/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if adaptive_allocations is not None: __body["adaptive_allocations"] = adaptive_allocations if number_of_allocations is not None: __body["number_of_allocations"] = number_of_allocations if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.update_trained_model_deployment", path_parts=__path_parts, ) @_rewrite_parameters() def upgrade_job_snapshot( self, *, job_id: str, snapshot_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Upgrade a snapshot. Upgrade an anomaly detection model snapshot to the latest major version. Over time, older snapshot formats are deprecated and removed. Anomaly detection jobs support only snapshots that are from the current or previous major version. This API provides a means to upgrade a snapshot to the current major version. This aids in preparing the cluster for an upgrade to the next major version. Only one snapshot per anomaly detection job can be upgraded at a time and the upgraded snapshot cannot be the current snapshot of the anomaly detection job.

``_ :param job_id: Identifier for the anomaly detection job. :param snapshot_id: A numerical character string that uniquely identifies the model snapshot. :param timeout: Controls the time to wait for the request to complete. :param wait_for_completion: When true, the API won’t respond until the upgrade is complete. Otherwise, it responds as soon as the upgrade task is assigned to a node. """ if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'job_id'") if snapshot_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot_id'") __path_parts: t.Dict[str, str] = { "job_id": _quote(job_id), "snapshot_id": _quote(snapshot_id), } __path = f'/_ml/anomaly_detectors/{__path_parts["job_id"]}/model_snapshots/{__path_parts["snapshot_id"]}/_upgrade' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="ml.upgrade_job_snapshot", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "analysis_config", "analysis_limits", "data_description", "description", "job_id", "model_plot", "model_snapshot_id", "model_snapshot_retention_days", "results_index_name", ), ) def validate( self, *, analysis_config: t.Optional[t.Mapping[str, t.Any]] = None, analysis_limits: t.Optional[t.Mapping[str, t.Any]] = None, data_description: t.Optional[t.Mapping[str, t.Any]] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, job_id: t.Optional[str] = None, model_plot: t.Optional[t.Mapping[str, t.Any]] = None, model_snapshot_id: t.Optional[str] = None, model_snapshot_retention_days: t.Optional[int] = None, pretty: t.Optional[bool] = None, results_index_name: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate an anomaly detection job.

``_ :param analysis_config: :param analysis_limits: :param data_description: :param description: :param job_id: :param model_plot: :param model_snapshot_id: :param model_snapshot_retention_days: :param results_index_name: """ __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_validate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if analysis_config is not None: __body["analysis_config"] = analysis_config if analysis_limits is not None: __body["analysis_limits"] = analysis_limits if data_description is not None: __body["data_description"] = data_description if description is not None: __body["description"] = description if job_id is not None: __body["job_id"] = job_id if model_plot is not None: __body["model_plot"] = model_plot if model_snapshot_id is not None: __body["model_snapshot_id"] = model_snapshot_id if model_snapshot_retention_days is not None: __body["model_snapshot_retention_days"] = model_snapshot_retention_days if results_index_name is not None: __body["results_index_name"] = results_index_name __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.validate", path_parts=__path_parts, ) @_rewrite_parameters( body_name="detector", ) def validate_detector( self, *, detector: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Validate an anomaly detection job.

``_ :param detector: """ if detector is None and body is None: raise ValueError( "Empty value passed for parameters 'detector' and 'body', one of them should be set." ) elif detector is not None and body is not None: raise ValueError("Cannot set both 'detector' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_ml/anomaly_detectors/_validate/detector" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = detector if detector is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="ml.validate_detector", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/monitoring.py000066400000000000000000000073751506101734100256270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class MonitoringClient(NamespacedClient): @_rewrite_parameters( body_name="operations", ) def bulk( self, *, interval: t.Union[str, t.Literal[-1], t.Literal[0]], operations: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, system_api_version: str, system_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Send monitoring data. This API is used by the monitoring features to send monitoring data.

``_ :param interval: Collection interval (e.g., '10s' or '10000ms') of the payload :param operations: :param system_api_version: :param system_id: Identifier of the monitored system """ if interval is None: raise ValueError("Empty value passed for parameter 'interval'") if operations is None and body is None: raise ValueError( "Empty value passed for parameters 'operations' and 'body', one of them should be set." ) elif operations is not None and body is not None: raise ValueError("Cannot set both 'operations' and 'body'") if system_api_version is None: raise ValueError("Empty value passed for parameter 'system_api_version'") if system_id is None: raise ValueError("Empty value passed for parameter 'system_id'") __path_parts: t.Dict[str, str] = {} __path = "/_monitoring/bulk" __query: t.Dict[str, t.Any] = {} if interval is not None: __query["interval"] = interval if system_api_version is not None: __query["system_api_version"] = system_api_version if system_id is not None: __query["system_id"] = system_id if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = operations if operations is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="monitoring.bulk", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/nodes.py000066400000000000000000000566121506101734100245500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse, TextApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class NodesClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def clear_repositories_metering_archive( self, *, node_id: t.Union[str, t.Sequence[str]], max_archive_version: int, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the archived repositories metering. Clear the archived repositories metering information in the cluster.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param max_archive_version: Specifies the maximum `archive_version` to be cleared from the archive. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") if max_archive_version in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'max_archive_version'") __path_parts: t.Dict[str, str] = { "node_id": _quote(node_id), "max_archive_version": _quote(max_archive_version), } __path = f'/_nodes/{__path_parts["node_id"]}/_repositories_metering/{__path_parts["max_archive_version"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="nodes.clear_repositories_metering_archive", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_repositories_metering_info( self, *, node_id: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cluster repositories metering. Get repositories metering information for a cluster. This API exposes monotonically non-decreasing counters and it is expected that clients would durably store the information needed to compute aggregations over a period of time. Additionally, the information exposed by this API is volatile, meaning that it will not be present after node restarts.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/_repositories_metering' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.get_repositories_metering_info", path_parts=__path_parts, ) @_rewrite_parameters() def hot_threads( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_idle_threads: t.Optional[bool] = None, interval: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, snapshots: t.Optional[int] = None, sort: t.Optional[ t.Union[str, t.Literal["block", "cpu", "gpu", "mem", "wait"]] ] = None, threads: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, type: t.Optional[ t.Union[str, t.Literal["block", "cpu", "gpu", "mem", "wait"]] ] = None, ) -> TextApiResponse: """ .. raw:: html

Get the hot threads for nodes. Get a breakdown of the hot threads on each selected node in the cluster. The output is plain text with a breakdown of the top hot threads for each node.

``_ :param node_id: List of node IDs or names used to limit returned information. :param ignore_idle_threads: If true, known idle threads (e.g. waiting in a socket select, or to get a task from an empty queue) are filtered out. :param interval: The interval to do the second sampling of threads. :param snapshots: Number of samples of thread stacktrace. :param sort: The sort order for 'cpu' type (default: total) :param threads: Specifies the number of hot threads to provide information for. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param type: The type to sample. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/hot_threads' else: __path_parts = {} __path = "/_nodes/hot_threads" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_idle_threads is not None: __query["ignore_idle_threads"] = ignore_idle_threads if interval is not None: __query["interval"] = interval if pretty is not None: __query["pretty"] = pretty if snapshots is not None: __query["snapshots"] = snapshots if sort is not None: __query["sort"] = sort if threads is not None: __query["threads"] = threads if timeout is not None: __query["timeout"] = timeout if type is not None: __query["type"] = type __headers = {"accept": "text/plain"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.hot_threads", path_parts=__path_parts, ) @_rewrite_parameters() def info( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, flat_settings: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get node information.

By default, the API returns all attributes and core settings for cluster nodes.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param metric: Limits the information returned to the specific metrics. Supports a comma-separated list, such as http,ingest. :param flat_settings: If true, returns settings in flat format. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/{__path_parts["metric"]}' elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if flat_settings is not None: __query["flat_settings"] = flat_settings if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.info", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("secure_settings_password",), ) def reload_secure_settings( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, secure_settings_password: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reload the keystore on nodes in the cluster.

Secure settings are stored in an on-disk keystore. Certain of these settings are reloadable. That is, you can change them on disk and reload them without restarting any nodes in the cluster. When you have updated reloadable secure settings in your keystore, you can use this API to reload those settings on each node.

When the Elasticsearch keystore is password protected and not simply obfuscated, you must provide the password for the keystore when you reload the secure settings. Reloading the settings for the whole cluster assumes that the keystores for all nodes are protected with the same password; this method is allowed only when inter-node communications are encrypted. Alternatively, you can reload the secure settings on each node by locally accessing the API and passing the node-specific Elasticsearch keystore password.

``_ :param node_id: The names of particular nodes in the cluster to target. :param secure_settings_password: The password for the Elasticsearch keystore. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/reload_secure_settings' else: __path_parts = {} __path = "/_nodes/reload_secure_settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if secure_settings_password is not None: __body["secure_settings_password"] = secure_settings_password if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="nodes.reload_secure_settings", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, index_metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, completion_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, fielddata_fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, fields: t.Optional[t.Union[str, t.Sequence[str]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, groups: t.Optional[bool] = None, human: t.Optional[bool] = None, include_segment_file_sizes: t.Optional[bool] = None, include_unloaded_segments: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, types: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get node statistics. Get statistics for nodes in a cluster. By default, all stats are returned. You can limit the returned information by using metrics.

``_ :param node_id: Comma-separated list of node IDs or names used to limit returned information. :param metric: Limit the information returned to the specified metrics :param index_metric: Limit the information returned for indices metric to the specific index metrics. It can be used only if indices (or all) metric is specified. :param completion_fields: Comma-separated list or wildcard expressions of fields to include in fielddata and suggest statistics. :param fielddata_fields: Comma-separated list or wildcard expressions of fields to include in fielddata statistics. :param fields: Comma-separated list or wildcard expressions of fields to include in the statistics. :param groups: Comma-separated list of search groups to include in the search statistics. :param include_segment_file_sizes: If true, the call reports the aggregated disk usage of each one of the Lucene index files (only applies if segment stats are requested). :param include_unloaded_segments: If `true`, the response includes information from segments that are not loaded into memory. :param level: Indicates whether statistics are aggregated at the cluster, index, or shard level. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param types: A comma-separated list of document types for the indexing index metric. """ __path_parts: t.Dict[str, str] if ( node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH and index_metric not in SKIP_IN_PATH ): __path_parts = { "node_id": _quote(node_id), "metric": _quote(metric), "index_metric": _quote(index_metric), } __path = f'/_nodes/{__path_parts["node_id"]}/stats/{__path_parts["metric"]}/{__path_parts["index_metric"]}' elif node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/stats/{__path_parts["metric"]}' elif metric not in SKIP_IN_PATH and index_metric not in SKIP_IN_PATH: __path_parts = { "metric": _quote(metric), "index_metric": _quote(index_metric), } __path = ( f'/_nodes/stats/{__path_parts["metric"]}/{__path_parts["index_metric"]}' ) elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/stats' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes/stats" __query: t.Dict[str, t.Any] = {} if completion_fields is not None: __query["completion_fields"] = completion_fields if error_trace is not None: __query["error_trace"] = error_trace if fielddata_fields is not None: __query["fielddata_fields"] = fielddata_fields if fields is not None: __query["fields"] = fields if filter_path is not None: __query["filter_path"] = filter_path if groups is not None: __query["groups"] = groups if human is not None: __query["human"] = human if include_segment_file_sizes is not None: __query["include_segment_file_sizes"] = include_segment_file_sizes if include_unloaded_segments is not None: __query["include_unloaded_segments"] = include_unloaded_segments if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if types is not None: __query["types"] = types __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.stats", path_parts=__path_parts, ) @_rewrite_parameters() def usage( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, metric: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get feature usage information.

``_ :param node_id: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes :param metric: Limits the information returned to the specific metrics. A comma-separated list of the following options: `_all`, `rest_actions`. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH and metric not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id), "metric": _quote(metric)} __path = f'/_nodes/{__path_parts["node_id"]}/usage/{__path_parts["metric"]}' elif node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/usage' elif metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_nodes/usage/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_nodes/usage" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="nodes.usage", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/query_rules.py000066400000000000000000000456071506101734100260210ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class QueryRulesClient(NamespacedClient): @_rewrite_parameters() def delete_rule( self, *, ruleset_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a query rule. Delete a query rule within a query ruleset. This is a destructive action that is only recoverable by re-adding the same rule with the create or update query rule API.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to delete :param rule_id: The unique identifier of the query rule within the specified ruleset to delete """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="query_rules.delete_rule", path_parts=__path_parts, ) @_rewrite_parameters() def delete_ruleset( self, *, ruleset_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a query ruleset. Remove a query ruleset and its associated data. This is a destructive action that is not recoverable.

``_ :param ruleset_id: The unique identifier of the query ruleset to delete """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="query_rules.delete_ruleset", path_parts=__path_parts, ) @_rewrite_parameters() def get_rule( self, *, ruleset_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a query rule. Get details about a query rule within a query ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to retrieve :param rule_id: The unique identifier of the query rule within the specified ruleset to retrieve """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.get_rule", path_parts=__path_parts, ) @_rewrite_parameters() def get_ruleset( self, *, ruleset_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a query ruleset. Get details about a query ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.get_ruleset", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def list_rulesets( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all query rulesets. Get summarized information about the query rulesets.

``_ :param from_: The offset from the first result to fetch. :param size: The maximum number of results to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_query_rules" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="query_rules.list_rulesets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("actions", "criteria", "type", "priority"), ) def put_rule( self, *, ruleset_id: str, rule_id: str, actions: t.Optional[t.Mapping[str, t.Any]] = None, criteria: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, type: t.Optional[t.Union[str, t.Literal["exclude", "pinned"]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, priority: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a query rule. Create or update a query rule within a query ruleset.

IMPORTANT: Due to limitations within pinned queries, you can only pin documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset containing the rule to be created or updated. :param rule_id: The unique identifier of the query rule within the specified ruleset to be created or updated. :param actions: The actions to take when the rule is matched. The format of this action depends on the rule type. :param criteria: The criteria that must be met for the rule to be applied. If multiple criteria are specified for a rule, all criteria must be met for the rule to be applied. :param type: The type of rule. :param priority: """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") if actions is None and body is None: raise ValueError("Empty value passed for parameter 'actions'") if criteria is None and body is None: raise ValueError("Empty value passed for parameter 'criteria'") if type is None and body is None: raise ValueError("Empty value passed for parameter 'type'") __path_parts: t.Dict[str, str] = { "ruleset_id": _quote(ruleset_id), "rule_id": _quote(rule_id), } __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_rule/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if actions is not None: __body["actions"] = actions if criteria is not None: __body["criteria"] = criteria if type is not None: __body["type"] = type if priority is not None: __body["priority"] = priority __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.put_rule", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("rules",), ) def put_ruleset( self, *, ruleset_id: str, rules: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a query ruleset. There is a limit of 100 rules per ruleset. This limit can be increased by using the xpack.applications.rules.max_rules_per_ruleset cluster setting.

IMPORTANT: Due to limitations within pinned queries, you can only select documents using ids or docs, but cannot use both in single rule. It is advised to use one or the other in query rulesets, to avoid errors. Additionally, pinned queries have a maximum limit of 100 pinned hits. If multiple matching rules pin more than 100 documents, only the first 100 documents are pinned in the order they are specified in the ruleset.

``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated. :param rules: """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if rules is None and body is None: raise ValueError("Empty value passed for parameter 'rules'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if rules is not None: __body["rules"] = rules __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.put_ruleset", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("match_criteria",), ) def test( self, *, ruleset_id: str, match_criteria: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Test a query ruleset. Evaluate match criteria against a query ruleset to identify the rules that would match that criteria.

``_ :param ruleset_id: The unique identifier of the query ruleset to be created or updated :param match_criteria: The match criteria to apply to rules in the given query ruleset. Match criteria should match the keys defined in the `criteria.metadata` field of the rule. """ if ruleset_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ruleset_id'") if match_criteria is None and body is None: raise ValueError("Empty value passed for parameter 'match_criteria'") __path_parts: t.Dict[str, str] = {"ruleset_id": _quote(ruleset_id)} __path = f'/_query_rules/{__path_parts["ruleset_id"]}/_test' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if match_criteria is not None: __body["match_criteria"] = match_criteria __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="query_rules.test", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/rollup.py000066400000000000000000000665011506101734100247530ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class RollupClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def delete_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a rollup job.

A job must be stopped before it can be deleted. If you attempt to delete a started job, an error occurs. Similarly, if you attempt to delete a nonexistent job, an exception occurs.

IMPORTANT: When you delete a job, you remove only the process that is actively monitoring and rolling up data. The API does not delete any previously rolled up data. This is by design; a user may wish to roll up a static data set. Because the data set is static, after it has been fully rolled up there is no need to keep the indexing rollup job around (as there will be no new data). Thus the job can be deleted, leaving behind the rolled up data for analysis. If you wish to also remove the rollup data and the rollup index contains the data for only a single job, you can delete the whole rollup index. If the rollup index stores data from several jobs, you must issue a delete-by-query that targets the rollup job's identifier in the rollup index. For example:

POST my_rollup_index/_delete_by_query
          {
            "query": {
              "term": {
                "_rollup.id": "the_rollup_job_id"
              }
            }
          }
          
``_ :param id: Identifier for the job. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="rollup.delete_job", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_jobs( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get rollup job information. Get the configuration, stats, and status of rollup jobs.

NOTE: This API returns only active (both STARTED and STOPPED) jobs. If a job was created, ran for a while, then was deleted, the API does not return any details about it. For details about a historical rollup job, the rollup capabilities API may be more useful.

``_ :param id: Identifier for the rollup job. If it is `_all` or omitted, the API returns all rollup jobs. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' else: __path_parts = {} __path = "/_rollup/job" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_jobs", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_rollup_caps( self, *, id: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the rollup job capabilities. Get the capabilities of any rollup jobs that have been configured for a specific index or index pattern.

This API is useful because a rollup job is often configured to rollup only a subset of fields from the source index. Furthermore, only certain aggregations can be configured for various fields, leading to a limited subset of functionality depending on that configuration. This API enables you to inspect an index and determine:

  1. Does this index have associated rollup data somewhere in the cluster?
  2. If yes to the first question, what fields were rolled up, what aggregations can be performed, and where does the data live?
``_ :param id: Index, indices or index-pattern to return rollup capabilities for. `_all` may be used to fetch rollup capabilities from all jobs. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_rollup/data/{__path_parts["id"]}' else: __path_parts = {} __path = "/_rollup/data" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_rollup_caps", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_rollup_index_caps( self, *, index: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the rollup index capabilities. Get the rollup capabilities of all jobs inside of a rollup index. A single rollup index may store the data for multiple rollup jobs and may have a variety of capabilities depending on those jobs. This API enables you to determine:

  • What jobs are stored in an index (or indices specified via a pattern)?
  • What target indices were rolled up, what fields were used in those rollups, and what aggregations can be performed on each job?
``_ :param index: Data stream or index to check for rollup capabilities. Wildcard (`*`) expressions are supported. """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rollup/data' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="rollup.get_rollup_index_caps", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "cron", "groups", "index_pattern", "page_size", "rollup_index", "headers", "metrics", "timeout", ), ignore_deprecated_options={"headers"}, ) @_stability_warning(Stability.EXPERIMENTAL) def put_job( self, *, id: str, cron: t.Optional[str] = None, groups: t.Optional[t.Mapping[str, t.Any]] = None, index_pattern: t.Optional[str] = None, page_size: t.Optional[int] = None, rollup_index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, headers: t.Optional[t.Mapping[str, t.Union[str, t.Sequence[str]]]] = None, human: t.Optional[bool] = None, metrics: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a rollup job.

WARNING: From 8.15.0, calling this API in a cluster with no rollup usage will fail with a message about the deprecation and planned removal of rollup features. A cluster needs to contain either a rollup job or a rollup index in order for this API to be allowed to run.

The rollup job configuration contains all the details about how the job should run, when it indexes documents, and what future queries will be able to run against the rollup index.

There are three main sections to the job configuration: the logistical details about the job (for example, the cron schedule), the fields that are used for grouping, and what metrics to collect for each group.

Jobs are created in a STOPPED state. You can start them with the start rollup jobs API.

``_ :param id: Identifier for the rollup job. This can be any alphanumeric string and uniquely identifies the data that is associated with the rollup job. The ID is persistent; it is stored with the rolled up data. If you create a job, let it run for a while, then delete the job, the data that the job rolled up is still be associated with this job ID. You cannot create a new job with the same ID since that could lead to problems with mismatched job configurations. :param cron: A cron string which defines the intervals when the rollup job should be executed. When the interval triggers, the indexer attempts to rollup the data in the index pattern. The cron pattern is unrelated to the time interval of the data being rolled up. For example, you may wish to create hourly rollups of your document but to only run the indexer on a daily basis at midnight, as defined by the cron. The cron pattern is defined just like a Watcher cron schedule. :param groups: Defines the grouping fields and aggregations that are defined for this rollup job. These fields will then be available later for aggregating into buckets. These aggs and fields can be used in any combination. Think of the groups configuration as defining a set of tools that can later be used in aggregations to partition the data. Unlike raw data, we have to think ahead to which fields and aggregations might be used. Rollups provide enough flexibility that you simply need to determine which fields are needed, not in what order they are needed. :param index_pattern: The index or index pattern to roll up. Supports wildcard-style patterns (`logstash-*`). The job attempts to rollup the entire index or index-pattern. :param page_size: The number of bucket results that are processed on each iteration of the rollup indexer. A larger value tends to execute faster, but requires more memory during processing. This value has no effect on how the data is rolled up; it is merely used for tweaking the speed or memory cost of the indexer. :param rollup_index: The index that contains the rollup results. The index can be shared with other rollup jobs. The data is stored so that it doesn’t interfere with unrelated jobs. :param headers: :param metrics: Defines the metrics to collect for each grouping tuple. By default, only the doc_counts are collected for each group. To make rollup useful, you will often add metrics like averages, mins, maxes, etc. Metrics are defined on a per-field basis and for each field you configure which metric should be collected. :param timeout: Time to wait for the request to complete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if cron is None and body is None: raise ValueError("Empty value passed for parameter 'cron'") if groups is None and body is None: raise ValueError("Empty value passed for parameter 'groups'") if index_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'index_pattern'") if page_size is None and body is None: raise ValueError("Empty value passed for parameter 'page_size'") if rollup_index is None and body is None: raise ValueError("Empty value passed for parameter 'rollup_index'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if cron is not None: __body["cron"] = cron if groups is not None: __body["groups"] = groups if index_pattern is not None: __body["index_pattern"] = index_pattern if page_size is not None: __body["page_size"] = page_size if rollup_index is not None: __body["rollup_index"] = rollup_index if headers is not None: __body["headers"] = headers if metrics is not None: __body["metrics"] = metrics if timeout is not None: __body["timeout"] = timeout __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="rollup.put_job", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("aggregations", "aggs", "query", "size"), ) @_stability_warning(Stability.EXPERIMENTAL) def rollup_search( self, *, index: t.Union[str, t.Sequence[str]], aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, rest_total_hits_as_int: t.Optional[bool] = None, size: t.Optional[int] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Search rolled-up data. The rollup search endpoint is needed because, internally, rolled-up documents utilize a different document structure than the original data. It rewrites standard Query DSL into a format that matches the rollup documents then takes the response and rewrites it back to what a client would expect given the original query.

The request body supports a subset of features from the regular search API. The following functionality is not available:

size: Because rollups work on pre-aggregated data, no search hits can be returned and so size must be set to zero or omitted entirely. highlighter, suggestors, post_filter, profile, explain: These are similarly disallowed.

For more detailed examples of using the rollup search API, including querying rolled-up data only or combining rolled-up and live data, refer to the External documentation.

``_ :param index: A comma-separated list of data streams and indices used to limit the request. This parameter has the following rules: * At least one data stream, index, or wildcard expression must be specified. This target can include a rollup or non-rollup index. For data streams, the stream's backing indices can only serve as non-rollup indices. Omitting the parameter or using `_all` are not permitted. * Multiple non-rollup indices may be specified. * Only one rollup index may be specified. If more than one are supplied, an exception occurs. * Wildcard expressions (`*`) may be used. If they match more than one rollup index, an exception occurs. However, you can use an expression to match multiple non-rollup indices or data streams. :param aggregations: Specifies aggregations. :param aggs: Specifies aggregations. :param query: Specifies a DSL query that is subject to some limitations. :param rest_total_hits_as_int: Indicates whether hits.total should be rendered as an integer or an object in the rest search response :param size: Must be zero if set, as rollups work on pre-aggregated data. :param typed_keys: Specify whether aggregation and suggester names should be prefixed by their respective types in the response """ if index in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_rollup_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if rest_total_hits_as_int is not None: __query["rest_total_hits_as_int"] = rest_total_hits_as_int if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if query is not None: __body["query"] = query if size is not None: __body["size"] = size __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="rollup.rollup_search", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def start_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start rollup jobs. If you try to start a job that does not exist, an exception occurs. If you try to start a job that is already started, nothing happens.

``_ :param id: Identifier for the rollup job. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="rollup.start_job", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def stop_job( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop rollup jobs. If you try to stop a job that does not exist, an exception occurs. If you try to stop a job that is already stopped, nothing happens.

Since only a stopped job can be deleted, it can be useful to block the API until the indexer has fully stopped. This is accomplished with the wait_for_completion query parameter, and optionally a timeout. For example:

POST _rollup/job/sensor/_stop?wait_for_completion=true&timeout=10s
          

The parameter blocks the API call from returning until either the job has moved to STOPPED or the specified time has elapsed. If the specified time elapses without the job moving to STOPPED, a timeout exception occurs.

``_ :param id: Identifier for the rollup job. :param timeout: If `wait_for_completion` is `true`, the API blocks for (at maximum) the specified duration while waiting for the job to stop. If more than `timeout` time has passed, the API throws a timeout exception. NOTE: Even if a timeout occurs, the stop request is still processing and eventually moves the job to STOPPED. The timeout simply means the API call itself timed out while waiting for the status change. :param wait_for_completion: If set to `true`, causes the API to block until the indexer state completely stops. If set to `false`, the API returns immediately and the indexer is stopped asynchronously in the background. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_rollup/job/{__path_parts["id"]}/_stop' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="rollup.stop_job", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/search_application.py000066400000000000000000000530131506101734100272600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SearchApplicationClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.BETA) def delete( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a search application.

Remove a search application and its associated alias. Indices attached to the search application are not removed.

``_ :param name: The name of the search application to delete. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="search_application.delete", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def delete_behavioral_analytics( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a behavioral analytics collection. The associated data stream is also deleted.

``_ :param name: The name of the analytics collection to be deleted """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="search_application.delete_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.BETA) def get( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get search application details.

``_ :param name: The name of the search application """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.get", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get_behavioral_analytics( self, *, name: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get behavioral analytics collections.

``_ :param name: A list of analytics collections to limit the returned information """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' else: __path_parts = {} __path = "/_application/analytics" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.get_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) @_stability_warning(Stability.BETA) def list( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, q: t.Optional[str] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get search applications. Get information about search applications.

``_ :param from_: Starting offset. :param q: Query in the Lucene query string syntax. :param size: Specifies a max number of results to get. """ __path_parts: t.Dict[str, str] = {} __path = "/_application/search_application" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if q is not None: __query["q"] = q if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="search_application.list", path_parts=__path_parts, ) @_rewrite_parameters( body_name="payload", ) @_stability_warning(Stability.EXPERIMENTAL) def post_behavioral_analytics_event( self, *, collection_name: str, event_type: t.Union[str, t.Literal["page_view", "search", "search_click"]], payload: t.Optional[t.Any] = None, body: t.Optional[t.Any] = None, debug: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a behavioral analytics collection event.

``_ :param collection_name: The name of the behavioral analytics collection. :param event_type: The analytics event type. :param payload: :param debug: Whether the response type has to include more details """ if collection_name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'collection_name'") if event_type in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'event_type'") if payload is None and body is None: raise ValueError( "Empty value passed for parameters 'payload' and 'body', one of them should be set." ) elif payload is not None and body is not None: raise ValueError("Cannot set both 'payload' and 'body'") __path_parts: t.Dict[str, str] = { "collection_name": _quote(collection_name), "event_type": _quote(event_type), } __path = f'/_application/analytics/{__path_parts["collection_name"]}/event/{__path_parts["event_type"]}' __query: t.Dict[str, t.Any] = {} if debug is not None: __query["debug"] = debug if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = payload if payload is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.post_behavioral_analytics_event", path_parts=__path_parts, ) @_rewrite_parameters( body_name="search_application", ) @_stability_warning(Stability.BETA) def put( self, *, name: str, search_application: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, create: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a search application.

``_ :param name: The name of the search application to be created or updated. :param search_application: :param create: If `true`, this request cannot replace or update existing Search Applications. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if search_application is None and body is None: raise ValueError( "Empty value passed for parameters 'search_application' and 'body', one of them should be set." ) elif search_application is not None and body is not None: raise ValueError("Cannot set both 'search_application' and 'body'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if create is not None: __query["create"] = create if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __body = search_application if search_application is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.put", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def put_behavioral_analytics( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a behavioral analytics collection.

``_ :param name: The name of the analytics collection to be created or updated. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/analytics/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="search_application.put_behavioral_analytics", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, ) @_stability_warning(Stability.EXPERIMENTAL) def render_query( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Render a search application query. Generate an Elasticsearch query using the specified query parameters and the search template associated with the search application or a default template if none is specified. If a parameter used in the search template is not specified in params, the parameter's default value will be used. The API returns the specific Elasticsearch query that would be generated and run by calling the search application search API.

You must have read privileges on the backing alias of the search application.

``_ :param name: The name of the search application to render teh query for. :param params: """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = ( f'/_application/search_application/{__path_parts["name"]}/_render_query' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if params is not None: __body["params"] = params if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.render_query", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("params",), ignore_deprecated_options={"params"}, ) @_stability_warning(Stability.BETA) def search( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, params: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, typed_keys: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a search application search. Generate and run an Elasticsearch query that uses the specified query parameteter and the search template associated with the search application or default template. Unspecified template parameters are assigned their default values if applicable.

``_ :param name: The name of the search application to be searched. :param params: Query parameters specific to this request, which will override any defaults specified in the template. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_application/search_application/{__path_parts["name"]}/_search' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if not __body: if params is not None: __body["params"] = params if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="search_application.search", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/searchable_snapshots.py000066400000000000000000000305621506101734100276270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SearchableSnapshotsClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def cache_stats( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get cache statistics. Get statistics about the shared cache for partially mounted indices.

``_ :param node_id: The names of the nodes in the cluster to target. :param master_timeout: """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_searchable_snapshots/{__path_parts["node_id"]}/cache/stats' else: __path_parts = {} __path = "/_searchable_snapshots/cache/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.cache_stats", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def clear_cache( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_indices: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the cache. Clear indices and data streams from the shared cache for partially mounted indices.

``_ :param index: A comma-separated list of data streams, indices, and aliases to clear from the cache. It supports wildcards (`*`). :param allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :param expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both. :param ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_searchable_snapshots/cache/clear' else: __path_parts = {} __path = "/_searchable_snapshots/cache/clear" __query: t.Dict[str, t.Any] = {} if allow_no_indices is not None: __query["allow_no_indices"] = allow_no_indices if error_trace is not None: __query["error_trace"] = error_trace if expand_wildcards is not None: __query["expand_wildcards"] = expand_wildcards if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.clear_cache", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "index", "ignore_index_settings", "index_settings", "renamed_index", ), ) def mount( self, *, repository: str, snapshot: str, index: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_index_settings: t.Optional[t.Sequence[str]] = None, index_settings: t.Optional[t.Mapping[str, t.Any]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, renamed_index: t.Optional[str] = None, storage: t.Optional[str] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Mount a snapshot. Mount a snapshot as a searchable snapshot index. Do not use this API for snapshots managed by index lifecycle management (ILM). Manually mounting ILM-managed snapshots can interfere with ILM processes.

``_ :param repository: The name of the repository containing the snapshot of the index to mount. :param snapshot: The name of the snapshot of the index to mount. :param index: The name of the index contained in the snapshot whose data is to be mounted. If no `renamed_index` is specified, this name will also be used to create the new index. :param ignore_index_settings: The names of settings that should be removed from the index when it is mounted. :param index_settings: The settings that should be added to the index when it is mounted. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param renamed_index: The name of the index that will be created. :param storage: The mount option for the searchable snapshot index. :param wait_for_completion: If true, the request blocks until the operation is complete. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") if index is None and body is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = ( f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_mount' ) __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if storage is not None: __query["storage"] = storage if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if index is not None: __body["index"] = index if ignore_index_settings is not None: __body["ignore_index_settings"] = ignore_index_settings if index_settings is not None: __body["index_settings"] = index_settings if renamed_index is not None: __body["renamed_index"] = renamed_index __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="searchable_snapshots.mount", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, index: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, level: t.Optional[ t.Union[str, t.Literal["cluster", "indices", "shards"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get searchable snapshot statistics.

``_ :param index: A comma-separated list of data streams and indices to retrieve statistics for. :param level: Return stats aggregated at cluster, index or shard level """ __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/{__path_parts["index"]}/_searchable_snapshots/stats' else: __path_parts = {} __path = "/_searchable_snapshots/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if level is not None: __query["level"] = level if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="searchable_snapshots.stats", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/security.py000066400000000000000000006607701506101734100253150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SecurityClient(NamespacedClient): @_rewrite_parameters( body_fields=("grant_type", "access_token", "password", "username"), ) def activate_user_profile( self, *, grant_type: t.Optional[ t.Union[str, t.Literal["access_token", "password"]] ] = None, access_token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate a user profile.

Create or update a user profile on behalf of another user.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. The calling application must have either an access_token or a combination of username and password for the user that the profile document is intended for. Elastic reserves the right to change or remove this feature in future releases without prior notice.

This API creates or updates a profile document for end users with information that is extracted from the user's authentication object including username, full_name, roles, and the authentication realm. For example, in the JWT access_token case, the profile user's username is extracted from the JWT token claim pointed to by the claims.principal setting of the JWT realm that authenticated the token.

When updating a profile document, the API enables the document if it was disabled. Any updates do not change existing content for either the labels or data fields.

``_ :param grant_type: The type of grant. :param access_token: The user's Elasticsearch access token or JWT. Both `access` and `id` JWT token types are supported and they depend on the underlying JWT realm configuration. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param username: The username that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. """ if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_activate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grant_type is not None: __body["grant_type"] = grant_type if access_token is not None: __body["access_token"] = access_token if password is not None: __body["password"] = password if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.activate_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() def authenticate( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate a user.

Authenticates a user and returns information about the authenticated user. Include the user information in a basic auth header. A successful call returns a JSON structure that shows user information such as their username, the roles that are assigned to the user, any assigned metadata, and information about the realms that authenticated and authorized the user. If the user cannot be authenticated, this API returns a 401 status code.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_authenticate" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("names",), ) def bulk_delete_role( self, *, names: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk delete roles.

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk delete roles API cannot delete roles that are defined in roles files.

``_ :param names: An array of role names to delete :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if names is None and body is None: raise ValueError("Empty value passed for parameter 'names'") __path_parts: t.Dict[str, str] = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if names is not None: __body["names"] = names __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_delete_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("roles",), ) def bulk_put_role( self, *, roles: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk create or update roles.

The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The bulk create or update roles API cannot update roles that are defined in roles files.

``_ :param roles: A dictionary of role name to RoleDescriptor objects to add or update :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if roles is None and body is None: raise ValueError("Empty value passed for parameter 'roles'") __path_parts: t.Dict[str, str] = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if roles is not None: __body["roles"] = roles __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_put_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("ids", "expiration", "metadata", "role_descriptors"), ) def bulk_update_api_keys( self, *, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Bulk update API keys. Update the attributes for multiple API keys.

IMPORTANT: It is not possible to use an API key as the authentication credential for this API. To update API keys, the owner user's credentials are required.

This API is similar to the update API key API but enables you to apply the same update to multiple API keys in one API call. This operation can greatly improve performance over making individual updates.

It is not possible to update expired or invalidated API keys.

This API supports updates to API key access scope, metadata and expiration. The access scope of each API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call.

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change an API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

A successful request returns a JSON structure that contains the IDs of all updated API keys, the IDs of API keys that already had the requested changes and did not require an update, and error details for any failed update.

``_ :param ids: The API key identifiers. :param expiration: Expiration time for the API keys. By default, API keys never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary nested metadata to associate with the API keys. Within the `metadata` object, top-level keys beginning with an underscore (`_`) are reserved for system usage. Any information specified with this parameter fully replaces metadata previously associated with the API key. :param role_descriptors: The role descriptors to assign to the API keys. An API key's effective permissions are an intersection of its assigned privileges and the point-in-time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, supply the `role_descriptors` parameter as an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter. The structure of a role descriptor is the same as the request for the create API keys API. """ if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key/_bulk_update" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if ids is not None: __body["ids"] = ids if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if role_descriptors is not None: __body["role_descriptors"] = role_descriptors __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.bulk_update_api_keys", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("password", "password_hash"), ) def change_password( self, *, username: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, password_hash: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Change passwords.

Change the passwords of users in the native realm and built-in users.

``_ :param username: The user whose password you want to change. If you do not specify this parameter, the password is changed for the current user. :param password: The new password value. Passwords must be at least 6 characters long. :param password_hash: A hash of the new password value. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: __path_parts = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_password' else: __path_parts = {} __path = "/_security/user/_password" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if password is not None: __body["password"] = password if password_hash is not None: __body["password_hash"] = password_hash __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.change_password", path_parts=__path_parts, ) @_rewrite_parameters() def clear_api_key_cache( self, *, ids: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the API key cache.

Evict a subset of all entries from the API key cache. The cache is also automatically cleared on state changes of the security index.

``_ :param ids: Comma-separated list of API key IDs to evict from the API key cache. To evict all API keys, use `*`. Does not support other wildcard patterns. """ if ids in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {"ids": _quote(ids)} __path = f'/_security/api_key/{__path_parts["ids"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_api_key_cache", path_parts=__path_parts, ) @_rewrite_parameters() def clear_cached_privileges( self, *, application: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the privileges cache.

Evict privileges from the native application privilege cache. The cache is also automatically cleared for applications that have their privileges updated.

``_ :param application: A comma-separated list of applications. To clear all applications, use an asterism (`*`). It does not support other wildcard patterns. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") __path_parts: t.Dict[str, str] = {"application": _quote(application)} __path = f'/_security/privilege/{__path_parts["application"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_privileges", path_parts=__path_parts, ) @_rewrite_parameters() def clear_cached_realms( self, *, realms: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, usernames: t.Optional[t.Sequence[str]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the user cache.

Evict users from the user cache. You can completely clear the cache or evict specific users.

User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. There are realm settings that you can use to configure the user cache. For more information, refer to the documentation about controlling the user cache.

``_ :param realms: A comma-separated list of realms. To clear all realms, use an asterisk (`*`). It does not support other wildcard patterns. :param usernames: A comma-separated list of the users to clear from the cache. If you do not specify this parameter, the API evicts all users from the user cache. """ if realms in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realms'") __path_parts: t.Dict[str, str] = {"realms": _quote(realms)} __path = f'/_security/realm/{__path_parts["realms"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if usernames is not None: __query["usernames"] = usernames __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_realms", path_parts=__path_parts, ) @_rewrite_parameters() def clear_cached_roles( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear the roles cache.

Evict roles from the native role cache.

``_ :param name: A comma-separated list of roles to evict from the role cache. To evict all roles, use an asterisk (`*`). It does not support other wildcard patterns. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_roles", path_parts=__path_parts, ) @_rewrite_parameters() def clear_cached_service_tokens( self, *, namespace: str, service: str, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear service account token caches.

Evict a subset of all entries from the service account token caches. Two separate caches exist for service account tokens: one cache for tokens backed by the service_tokens file, and another for tokens backed by the .security index. This API clears matching entries from both caches.

The cache for service account tokens backed by the .security index is cleared automatically on state changes of the security index. The cache for tokens backed by the service_tokens file is cleared automatically on file changes.

``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The name of the service, which must be unique within its namespace. :param name: A comma-separated list of token names to evict from the service account token caches. Use a wildcard (`*`) to evict all tokens that belong to a service account. It does not support other wildcard patterns. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}/_clear_cache' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="security.clear_cached_service_tokens", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("expiration", "metadata", "name", "role_descriptors"), ) def create_api_key( self, *, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create an API key.

Create an API key for access without requiring basic authentication.

IMPORTANT: If the credential that is used to authenticate this request is an API key, the derived API key cannot have any privileges. If you specify privileges, the API returns an error.

A successful request returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

NOTE: By default, API keys never expire. You can specify expiration information when you create the API keys.

The API keys are created by the Elasticsearch API key service, which is automatically enabled. To configure or turn off the API key service, refer to API key service setting documentation.

``_ :param expiration: The expiration time for the API key. By default, API keys never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. :param name: A name for the API key. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param role_descriptors: An array of role descriptors for this API key. When it is not specified or it is an empty array, the API key will have a point in time snapshot of permissions of the authenticated user. If you supply role descriptors, the resultant permissions are an intersection of API keys permissions and the authenticated user's permissions thereby limiting the access scope for API keys. The structure of role descriptor is the same as the request for the create role API. For more details, refer to the create or update roles API. NOTE: Due to the way in which this permission intersection is calculated, it is not possible to create an API key that is a child of another API key, unless the derived key is created without any privileges. In this case, you must explicitly specify a role descriptor with no privileges. The derived API key can be used for authentication; it will not have authority to call Elasticsearch APIs. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if name is not None: __body["name"] = name if role_descriptors is not None: __body["role_descriptors"] = role_descriptors __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.create_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("access", "name", "expiration", "metadata"), ) def create_cross_cluster_api_key( self, *, access: t.Optional[t.Mapping[str, t.Any]] = None, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a cross-cluster API key.

Create an API key of the cross_cluster type for the API key based remote cluster access. A cross_cluster API key cannot be used to authenticate through the REST interface.

IMPORTANT: To authenticate this request you must use a credential that is not an API key. Even if you use an API key that has the required privilege, the API returns an error.

Cross-cluster API keys are created by the Elasticsearch API key service, which is automatically enabled.

NOTE: Unlike REST API keys, a cross-cluster API key does not capture permissions of the authenticated user. The API key’s effective permission is exactly as specified with the access property.

A successful request returns a JSON structure that contains the API key, its unique ID, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

By default, API keys never expire. You can specify expiration information when you create the API keys.

Cross-cluster API keys can only be updated with the update cross-cluster API key API. Attempting to update them with the update REST API key API or the bulk update REST API keys API will result in an error.

``_ :param access: The access to be granted to this API key. The access is composed of permissions for cross-cluster search and cross-cluster replication. At least one of them must be specified. NOTE: No explicit privileges should be specified for either search or replication access. The creation process automatically converts the access specification to a role descriptor which has relevant privileges assigned accordingly. :param name: Specifies the name for this API key. :param expiration: Expiration time for the API key. By default, API keys never expire. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. """ if access is None and body is None: raise ValueError("Empty value passed for parameter 'access'") if name is None and body is None: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {} __path = "/_security/cross_cluster/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if access is not None: __body["access"] = access if name is not None: __body["name"] = name if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.create_cross_cluster_api_key", path_parts=__path_parts, ) @_rewrite_parameters() def create_service_token( self, *, namespace: str, service: str, name: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a service account token.

Create a service accounts token for access without requiring basic authentication.

NOTE: Service account tokens never expire. You must actively delete them if they are no longer needed.

``_ :param namespace: The name of the namespace, which is a top-level grouping of service accounts. :param service: The name of the service. :param name: The name for the service account token. If omitted, a random name will be generated. Token names must be at least one and no more than 256 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), dashes (`-`), and underscores (`_`), but cannot begin with an underscore. NOTE: Token names must be unique in the context of the associated service account. They must also be globally unique with their fully qualified names, which are comprised of the service account principal and token name, such as `//`. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") __path_parts: t.Dict[str, str] if ( namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH and name not in SKIP_IN_PATH ): __path_parts = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}' __method = "PUT" elif namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace), "service": _quote(service)} __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token' __method = "POST" else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] __method, __path, params=__query, headers=__headers, endpoint_id="security.create_service_token", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("x509_certificate_chain",), ) def delegate_pki( self, *, x509_certificate_chain: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delegate PKI authentication.

This API implements the exchange of an X509Certificate chain for an Elasticsearch access token. The certificate chain is validated, according to RFC 5280, by sequentially considering the trust configuration of every installed PKI realm that has delegation.enabled set to true. A successfully trusted client certificate is also subject to the validation of the subject distinguished name according to thw username_pattern of the respective realm.

This API is called by smart and trusted proxies, such as Kibana, which terminate the user's TLS session but still want to authenticate the user by using a PKI realm—-​as if the user connected directly to Elasticsearch.

IMPORTANT: The association between the subject public key in the target certificate and the corresponding private key is not validated. This is part of the TLS authentication process and it is delegated to the proxy that calls this API. The proxy is trusted to have performed the TLS authentication and this API translates that authentication into an Elasticsearch access token.

``_ :param x509_certificate_chain: The X509Certificate chain, which is represented as an ordered string array. Each string in the array is a base64-encoded (Section 4 of RFC4648 - not base64url-encoded) of the certificate's DER encoding. The first element is the target certificate that contains the subject distinguished name that is requesting access. This may be followed by additional certificates; each subsequent certificate is used to certify the previous one. """ if x509_certificate_chain is None and body is None: raise ValueError( "Empty value passed for parameter 'x509_certificate_chain'" ) __path_parts: t.Dict[str, str] = {} __path = "/_security/delegate_pki" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if x509_certificate_chain is not None: __body["x509_certificate_chain"] = x509_certificate_chain __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.delegate_pki", path_parts=__path_parts, ) @_rewrite_parameters() def delete_privileges( self, *, application: str, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete application privileges.

To use this API, you must have one of the following privileges:

  • The manage_security cluster privilege (or a greater privilege such as all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
``_ :param application: The name of the application. Application privileges are always associated with exactly one application. :param name: The name of the privilege. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if application in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'application'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "application": _quote(application), "name": _quote(name), } __path = ( f'/_security/privilege/{__path_parts["application"]}/{__path_parts["name"]}' ) __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_privileges", path_parts=__path_parts, ) @_rewrite_parameters() def delete_role( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete roles.

Delete roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The delete roles API cannot remove roles that are defined in roles files.

``_ :param name: The name of the role. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_role", path_parts=__path_parts, ) @_rewrite_parameters() def delete_role_mapping( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete role mappings.

Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The delete role mappings API cannot remove role mappings that are defined in role mapping files.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters() def delete_service_token( self, *, namespace: str, service: str, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete service account tokens.

Delete service account tokens for a service in a specified namespace.

``_ :param namespace: The namespace, which is a top-level grouping of service accounts. :param service: The service name. :param name: The name of the service account token. :param refresh: If `true` then refresh the affected shards to make this operation visible to search, if `wait_for` (the default) then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), "name": _quote(name), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential/token/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_service_token", path_parts=__path_parts, ) @_rewrite_parameters() def delete_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete users.

Delete users from the native realm.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="security.delete_user", path_parts=__path_parts, ) @_rewrite_parameters() def disable_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Disable users.

Disable users in the native realm. By default, when you create users, they are enabled. You can use this API to revoke a user's access to Elasticsearch.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_disable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.disable_user", path_parts=__path_parts, ) @_rewrite_parameters() def disable_user_profile( self, *, uid: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Disable a user profile.

Disable user profiles so that they are not visible in user profile searches.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

When you activate a user profile, its automatically enabled and visible in user profile searches. You can use the disable user profile API to disable a user profile so it’s not visible in these searches. To re-enable a disabled user profile, use the enable user profile API .

``_ :param uid: Unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', it does nothing with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_disable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.disable_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() def enable_user( self, *, username: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enable users.

Enable users in the native realm. By default, when you create users, they are enabled.

``_ :param username: An identifier for the user. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}/_enable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.enable_user", path_parts=__path_parts, ) @_rewrite_parameters() def enable_user_profile( self, *, uid: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enable a user profile.

Enable user profiles to make them visible in user profile searches.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

When you activate a user profile, it's automatically enabled and visible in user profile searches. If you later disable the user profile, you can use the enable user profile API to make the profile visible in these searches again.

``_ :param uid: A unique identifier for the user profile. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_enable' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="security.enable_user_profile", path_parts=__path_parts, ) @_rewrite_parameters() def enroll_kibana( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enroll Kibana.

Enable a Kibana instance to configure itself for communication with a secured Elasticsearch cluster.

NOTE: This API is currently intended for internal use only by Kibana. Kibana uses this API internally to configure itself for communications with an Elasticsearch cluster that already has security features enabled.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/kibana" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.enroll_kibana", path_parts=__path_parts, ) @_rewrite_parameters() def enroll_node( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Enroll a node.

Enroll a new node to allow it to join an existing cluster with security features enabled.

The response contains all the necessary information for the joining node to bootstrap discovery and security related settings so that it can successfully join the cluster. The response contains key and certificate material that allows the caller to generate valid signed certificates for the HTTP layer of all nodes in the cluster.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/enroll/node" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.enroll_node", path_parts=__path_parts, ) @_rewrite_parameters() def get_api_key( self, *, active_only: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, name: t.Optional[str] = None, owner: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, username: t.Optional[str] = None, with_limited_by: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get API key information.

Retrieves information for one or more API keys. NOTE: If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. If you have read_security, manage_api_key or greater privileges (including manage_security), this API returns all API keys regardless of ownership.

``_ :param active_only: A boolean flag that can be used to query API keys that are currently active. An API key is considered active if it is neither invalidated, nor expired at query time. You can specify this together with other parameters such as `owner` or `name`. If `active_only` is false, the response will include both active and inactive (expired or invalidated) keys. :param id: An API key id. This parameter cannot be used with any of `name`, `realm_name` or `username`. :param name: An API key name. This parameter cannot be used with any of `id`, `realm_name` or `username`. It supports prefix search with wildcard. :param owner: A boolean flag that can be used to query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either `id` or `name` or when `owner` flag is set to `true`. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors. :param with_profile_uid: Determines whether to also retrieve the profile uid, for the API key owner principal, if it exists. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} if active_only is not None: __query["active_only"] = active_only if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if id is not None: __query["id"] = id if name is not None: __query["name"] = name if owner is not None: __query["owner"] = owner if pretty is not None: __query["pretty"] = pretty if realm_name is not None: __query["realm_name"] = realm_name if username is not None: __query["username"] = username if with_limited_by is not None: __query["with_limited_by"] = with_limited_by if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_api_key", path_parts=__path_parts, ) @_rewrite_parameters() def get_builtin_privileges( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get builtin privileges.

Get the list of cluster privileges and index privileges that are available in this version of Elasticsearch.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege/_builtin" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_builtin_privileges", path_parts=__path_parts, ) @_rewrite_parameters() def get_privileges( self, *, application: t.Optional[str] = None, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get application privileges.

To use this API, you must have one of the following privileges:

  • The read_security cluster privilege (or a greater privilege such as manage_security or all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.
``_ :param application: The name of the application. Application privileges are always associated with exactly one application. If you do not specify this parameter, the API returns information about all privileges for all applications. :param name: The name of the privilege. If you do not specify this parameter, the API returns information about all privileges for the requested application. """ __path_parts: t.Dict[str, str] if application not in SKIP_IN_PATH and name not in SKIP_IN_PATH: __path_parts = {"application": _quote(application), "name": _quote(name)} __path = f'/_security/privilege/{__path_parts["application"]}/{__path_parts["name"]}' elif application not in SKIP_IN_PATH: __path_parts = {"application": _quote(application)} __path = f'/_security/privilege/{__path_parts["application"]}' else: __path_parts = {} __path = "/_security/privilege" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_privileges", path_parts=__path_parts, ) @_rewrite_parameters() def get_role( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get roles.

Get roles in the native realm. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The get roles API cannot retrieve roles that are defined in roles files.

``_ :param name: The name of the role. You can specify multiple roles as a comma-separated list. If you do not specify this parameter, the API returns information about all roles. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' else: __path_parts = {} __path = "/_security/role" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_role", path_parts=__path_parts, ) @_rewrite_parameters() def get_role_mapping( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get role mappings.

Role mappings define which roles are assigned to each user. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The get role mappings API cannot retrieve role mappings that are defined in role mapping files.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. You can specify multiple mapping names as a comma-separated list. If you do not specify this parameter, the API returns information about all role mappings. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' else: __path_parts = {} __path = "/_security/role_mapping" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters() def get_service_accounts( self, *, namespace: t.Optional[str] = None, service: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get service accounts.

Get a list of service accounts that match the provided path parameters.

NOTE: Currently, only the elastic/fleet-server service account is available.

``_ :param namespace: The name of the namespace. Omit this parameter to retrieve information about all service accounts. If you omit this parameter, you must also omit the `service` parameter. :param service: The service name. Omit this parameter to retrieve information about all service accounts that belong to the specified `namespace`. """ __path_parts: t.Dict[str, str] if namespace not in SKIP_IN_PATH and service not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace), "service": _quote(service)} __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}' elif namespace not in SKIP_IN_PATH: __path_parts = {"namespace": _quote(namespace)} __path = f'/_security/service/{__path_parts["namespace"]}' else: __path_parts = {} __path = "/_security/service" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_service_accounts", path_parts=__path_parts, ) @_rewrite_parameters() def get_service_credentials( self, *, namespace: str, service: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get service account credentials.

To use this API, you must have at least the read_security cluster privilege (or a greater privilege such as manage_service_account or manage_security).

The response includes service account tokens that were created with the create service account tokens API as well as file-backed tokens from all nodes of the cluster.

NOTE: For tokens backed by the service_tokens file, the API collects them from all nodes of the cluster. Tokens with the same name from different nodes are assumed to be the same token and are only counted once towards the total number of service tokens.

``_ :param namespace: The name of the namespace. :param service: The service name. """ if namespace in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'namespace'") if service in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'service'") __path_parts: t.Dict[str, str] = { "namespace": _quote(namespace), "service": _quote(service), } __path = f'/_security/service/{__path_parts["namespace"]}/{__path_parts["service"]}/credential' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_service_credentials", path_parts=__path_parts, ) @_rewrite_parameters() def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get security index settings.

Get the user-configurable settings for the security internal index (.security and associated indices). Only a subset of the index settings — those that are user-configurable—will be shown. This includes:

  • index.auto_expand_replicas
  • index.number_of_replicas
``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "grant_type", "kerberos_ticket", "password", "refresh_token", "scope", "username", ), ) def get_token( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, grant_type: t.Optional[ t.Union[ str, t.Literal[ "_kerberos", "client_credentials", "password", "refresh_token" ], ] ] = None, human: t.Optional[bool] = None, kerberos_ticket: t.Optional[str] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, scope: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a token.

Create a bearer token for access without requiring basic authentication. The tokens are created by the Elasticsearch Token Service, which is automatically enabled when you configure TLS on the HTTP interface. Alternatively, you can explicitly enable the xpack.security.authc.token.enabled setting. When you are running in production mode, a bootstrap check prevents you from enabling the token service unless you also enable TLS on the HTTP interface.

The get token API takes the same parameters as a typical OAuth 2.0 token API except for the use of a JSON request body.

A successful get token API call returns a JSON structure that contains the access token, the amount of time (seconds) that the token expires in, the type, and the scope if available.

The tokens returned by the get token API have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the xpack.security.authc.token.timeout setting. If you want to invalidate a token immediately, you can do so by using the invalidate token API.

``_ :param grant_type: The type of grant. Supported grant types are: `password`, `_kerberos`, `client_credentials`, and `refresh_token`. :param kerberos_ticket: The base64 encoded kerberos ticket. If you specify the `_kerberos` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param password: The user's password. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param refresh_token: The string that was returned when you created the token, which enables you to extend its life. If you specify the `refresh_token` grant type, this parameter is required. This parameter is not valid with any other supported grant type. :param scope: The scope of the token. Currently tokens are only issued for a scope of FULL regardless of the value sent with the request. :param username: The username that identifies the user. If you specify the `password` grant type, this parameter is required. This parameter is not valid with any other supported grant type. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grant_type is not None: __body["grant_type"] = grant_type if kerberos_ticket is not None: __body["kerberos_ticket"] = kerberos_ticket if password is not None: __body["password"] = password if refresh_token is not None: __body["refresh_token"] = refresh_token if scope is not None: __body["scope"] = scope if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.get_token", path_parts=__path_parts, ) @_rewrite_parameters() def get_user( self, *, username: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get users.

Get information about users in the native realm and built-in users.

``_ :param username: An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. :param with_profile_uid: Determines whether to retrieve the user profile UID, if it exists, for the users. """ __path_parts: t.Dict[str, str] if username not in SKIP_IN_PATH: __path_parts = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' else: __path_parts = {} __path = "/_security/user" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user", path_parts=__path_parts, ) @_rewrite_parameters() def get_user_privileges( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get user privileges.

Get the security privileges for the logged in user. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature. To check whether a user has a specific list of privileges, use the has privileges API.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_security/user/_privileges" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user_privileges", path_parts=__path_parts, ) @_rewrite_parameters() def get_user_profile( self, *, uid: t.Union[str, t.Sequence[str]], data: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a user profile.

Get a user's profile using the unique profile ID.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param uid: A unique identifier for the user profile. :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content use `data=` to retrieve content nested under the specified ``. By default returns no `data` content. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}' __query: t.Dict[str, t.Any] = {} if data is not None: __query["data"] = data if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.get_user_profile", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "api_key", "grant_type", "access_token", "password", "run_as", "username", ), ignore_deprecated_options={"api_key"}, ) def grant_api_key( self, *, api_key: t.Optional[t.Mapping[str, t.Any]] = None, grant_type: t.Optional[ t.Union[str, t.Literal["access_token", "password"]] ] = None, access_token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, password: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, run_as: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Grant an API key.

Create an API key on behalf of another user. This API is similar to the create API keys API, however it creates the API key for a user that is different than the user that runs the API. The caller must have authentication credentials for the user on whose behalf the API key will be created. It is not possible to use this API to create an API key without that user's credentials. The supported user authentication credential types are:

  • username and password
  • Elasticsearch access tokens
  • JWTs

The user, for whom the authentication credentials is provided, can optionally "run as" (impersonate) another user. In this case, the API key will be created on behalf of the impersonated user.

This API is intended be used by applications that need to create and manage API keys for end users, but cannot guarantee that those users have permission to create API keys on their own behalf. The API keys are created by the Elasticsearch API key service, which is automatically enabled.

A successful grant API key API call returns a JSON structure that contains the API key, its unique id, and its name. If applicable, it also returns expiration information for the API key in milliseconds.

By default, API keys never expire. You can specify expiration information when you create the API keys.

``_ :param api_key: The API key. :param grant_type: The type of grant. Supported grant types are: `access_token`, `password`. :param access_token: The user's access token. If you specify the `access_token` grant type, this parameter is required. It is not valid with other grant types. :param password: The user's password. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. :param run_as: The name of the user to be impersonated. :param username: The user name that identifies the user. If you specify the `password` grant type, this parameter is required. It is not valid with other grant types. """ if api_key is None and body is None: raise ValueError("Empty value passed for parameter 'api_key'") if grant_type is None and body is None: raise ValueError("Empty value passed for parameter 'grant_type'") __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key/grant" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if api_key is not None: __body["api_key"] = api_key if grant_type is not None: __body["grant_type"] = grant_type if access_token is not None: __body["access_token"] = access_token if password is not None: __body["password"] = password if run_as is not None: __body["run_as"] = run_as if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.grant_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("application", "cluster", "index"), ) def has_privileges( self, *, user: t.Optional[str] = None, application: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, cluster: t.Optional[ t.Sequence[ t.Union[ str, t.Literal[ "all", "cancel_task", "create_snapshot", "cross_cluster_replication", "cross_cluster_search", "delegate_pki", "grant_api_key", "manage", "manage_api_key", "manage_autoscaling", "manage_behavioral_analytics", "manage_ccr", "manage_data_frame_transforms", "manage_data_stream_global_retention", "manage_enrich", "manage_esql", "manage_ilm", "manage_index_templates", "manage_inference", "manage_ingest_pipelines", "manage_logstash_pipelines", "manage_ml", "manage_oidc", "manage_own_api_key", "manage_pipeline", "manage_rollup", "manage_saml", "manage_search_application", "manage_search_query_rules", "manage_search_synonyms", "manage_security", "manage_service_account", "manage_slm", "manage_token", "manage_transform", "manage_user_profile", "manage_watcher", "monitor", "monitor_data_frame_transforms", "monitor_data_stream_global_retention", "monitor_enrich", "monitor_esql", "monitor_inference", "monitor_ml", "monitor_rollup", "monitor_snapshot", "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", "none", "post_behavioral_analytics_event", "read_ccr", "read_fleet_secrets", "read_ilm", "read_pipeline", "read_security", "read_slm", "transport_client", "write_connector_secrets", "write_fleet_secrets", ], ] ] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check user privileges.

Determine whether the specified user has a specified list of privileges. All users can use this API, but only to determine their own privileges. To check the privileges of other users, you must use the run as feature.

``_ :param user: Username :param application: :param cluster: A list of the cluster privileges that you want to check. :param index: """ __path_parts: t.Dict[str, str] if user not in SKIP_IN_PATH: __path_parts = {"user": _quote(user)} __path = f'/_security/user/{__path_parts["user"]}/_has_privileges' else: __path_parts = {} __path = "/_security/user/_has_privileges" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if application is not None: __body["application"] = application if cluster is not None: __body["cluster"] = cluster if index is not None: __body["index"] = index __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.has_privileges", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("privileges", "uids"), ) def has_privileges_user_profile( self, *, privileges: t.Optional[t.Mapping[str, t.Any]] = None, uids: t.Optional[t.Sequence[str]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Check user profile privileges.

Determine whether the users associated with the specified user profile IDs have all the requested privileges.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param privileges: An object containing all the privileges to be checked. :param uids: A list of profile IDs. The privileges are checked for associated users of the profiles. """ if privileges is None and body is None: raise ValueError("Empty value passed for parameter 'privileges'") if uids is None and body is None: raise ValueError("Empty value passed for parameter 'uids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_has_privileges" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if privileges is not None: __body["privileges"] = privileges if uids is not None: __body["uids"] = uids __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.has_privileges_user_profile", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("id", "ids", "name", "owner", "realm_name", "username"), ) def invalidate_api_key( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, id: t.Optional[str] = None, ids: t.Optional[t.Sequence[str]] = None, name: t.Optional[str] = None, owner: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate API keys.

This API invalidates API keys created by the create API key or grant API key APIs. Invalidated API keys fail authentication, but they can still be viewed using the get API key information and query API key information APIs, for at least the configured retention period, until they are automatically deleted.

To use this API, you must have at least the manage_security, manage_api_key, or manage_own_api_key cluster privileges. The manage_security privilege allows deleting any API key, including both REST and cross cluster API keys. The manage_api_key privilege allows deleting any REST API key, but not cross cluster API keys. The manage_own_api_key only allows deleting REST API keys that are owned by the user. In addition, with the manage_own_api_key privilege, an invalidation request must be issued in one of the three formats:

  • Set the parameter owner=true.
  • Or, set both username and realm_name to match the user's identity.
  • Or, if the request is issued by an API key, that is to say an API key invalidates itself, specify its ID in the ids field.
``_ :param id: :param ids: A list of API key ids. This parameter cannot be used with any of `name`, `realm_name`, or `username`. :param name: An API key name. This parameter cannot be used with any of `ids`, `realm_name` or `username`. :param owner: Query API keys owned by the currently authenticated user. The `realm_name` or `username` parameters cannot be specified when this parameter is set to `true` as they are assumed to be the currently authenticated ones. NOTE: At least one of `ids`, `name`, `username`, and `realm_name` must be specified if `owner` is `false`. :param realm_name: The name of an authentication realm. This parameter cannot be used with either `ids` or `name`, or when `owner` flag is set to `true`. :param username: The username of a user. This parameter cannot be used with either `ids` or `name` or when `owner` flag is set to `true`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if id is not None: __body["id"] = id if ids is not None: __body["ids"] = ids if name is not None: __body["name"] = name if owner is not None: __body["owner"] = owner if realm_name is not None: __body["realm_name"] = realm_name if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.invalidate_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("realm_name", "refresh_token", "token", "username"), ) def invalidate_token( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm_name: t.Optional[str] = None, refresh_token: t.Optional[str] = None, token: t.Optional[str] = None, username: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate a token.

The access tokens returned by the get token API have a finite period of time for which they are valid. After that time period, they can no longer be used. The time period is defined by the xpack.security.authc.token.timeout setting.

The refresh tokens returned by the get token API are only valid for 24 hours. They can also be used exactly once. If you want to invalidate one or more access or refresh tokens immediately, use this invalidate token API.

NOTE: While all parameters are optional, at least one of them is required. More specifically, either one of token or refresh_token parameters is required. If none of these two are specified, then realm_name and/or username need to be specified.

``_ :param realm_name: The name of an authentication realm. This parameter cannot be used with either `refresh_token` or `token`. :param refresh_token: A refresh token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. :param token: An access token. This parameter cannot be used if any of `refresh_token`, `realm_name`, or `username` are used. :param username: The username of a user. This parameter cannot be used with either `refresh_token` or `token`. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oauth2/token" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if realm_name is not None: __body["realm_name"] = realm_name if refresh_token is not None: __body["refresh_token"] = refresh_token if token is not None: __body["token"] = token if username is not None: __body["username"] = username __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.invalidate_token", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("nonce", "redirect_uri", "state", "realm"), ) def oidc_authenticate( self, *, nonce: t.Optional[str] = None, redirect_uri: t.Optional[str] = None, state: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate OpenID Connect.

Exchange an OpenID Connect authentication response message for an Elasticsearch internal access token and refresh token that can be subsequently used for authentication.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param nonce: Associate a client session with an ID token and mitigate replay attacks. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. :param redirect_uri: The URL to which the OpenID Connect Provider redirected the User Agent in response to an authentication request after a successful authentication. This URL must be provided as-is (URL encoded), taken from the body of the response or as the value of a location header in the response from the OpenID Connect Provider. :param state: Maintain state between the authentication request and the response. This value needs to be the same as the one that was provided to the `/_security/oidc/prepare` API or the one that was generated by Elasticsearch and included in the response to that call. :param realm: The name of the OpenID Connect realm. This property is useful in cases where multiple realms are defined. """ if nonce is None and body is None: raise ValueError("Empty value passed for parameter 'nonce'") if redirect_uri is None and body is None: raise ValueError("Empty value passed for parameter 'redirect_uri'") if state is None and body is None: raise ValueError("Empty value passed for parameter 'state'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/authenticate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if nonce is not None: __body["nonce"] = nonce if redirect_uri is not None: __body["redirect_uri"] = redirect_uri if state is not None: __body["state"] = state if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("token", "refresh_token"), ) def oidc_logout( self, *, token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of OpenID Connect.

Invalidate an access token and a refresh token that were generated as a response to the /_security/oidc/authenticate API.

If the OpenID Connect authentication realm in Elasticsearch is accordingly configured, the response to this call will contain a URI pointing to the end session endpoint of the OpenID Connect Provider in order to perform single logout.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param token: The access token to be invalidated. :param refresh_token: The refresh token to be invalidated. """ if token is None and body is None: raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if token is not None: __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("iss", "login_hint", "nonce", "realm", "state"), ) def oidc_prepare_authentication( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, iss: t.Optional[str] = None, login_hint: t.Optional[str] = None, nonce: t.Optional[str] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, state: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare OpenID connect authentication.

Create an oAuth 2.0 authentication request as a URL string based on the configuration of the OpenID Connect authentication realm in Elasticsearch.

The response of this API is a URL pointing to the Authorization Endpoint of the configured OpenID Connect Provider, which can be used to redirect the browser of the user in order to continue the authentication process.

Elasticsearch exposes all the necessary OpenID Connect related functionality with the OpenID Connect APIs. These APIs are used internally by Kibana in order to provide OpenID Connect based authentication, but can also be used by other, custom web applications or other clients.

``_ :param iss: In the case of a third party initiated single sign on, this is the issuer identifier for the OP that the RP is to send the authentication request to. It cannot be specified when *realm* is specified. One of *realm* or *iss* is required. :param login_hint: In the case of a third party initiated single sign on, it is a string value that is included in the authentication request as the *login_hint* parameter. This parameter is not valid when *realm* is specified. :param nonce: The value used to associate a client session with an ID token and to mitigate replay attacks. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. :param realm: The name of the OpenID Connect realm in Elasticsearch the configuration of which should be used in order to generate the authentication request. It cannot be specified when *iss* is specified. One of *realm* or *iss* is required. :param state: The value used to maintain state between the authentication request and the response, typically used as a Cross-Site Request Forgery mitigation. If the caller of the API does not provide a value, Elasticsearch will generate one with sufficient entropy and return it in the response. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/oidc/prepare" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if iss is not None: __body["iss"] = iss if login_hint is not None: __body["login_hint"] = login_hint if nonce is not None: __body["nonce"] = nonce if realm is not None: __body["realm"] = realm if state is not None: __body["state"] = state __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.oidc_prepare_authentication", path_parts=__path_parts, ) @_rewrite_parameters( body_name="privileges", ) def put_privileges( self, *, privileges: t.Optional[ t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]] ] = None, body: t.Optional[t.Mapping[str, t.Mapping[str, t.Mapping[str, t.Any]]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update application privileges.

To use this API, you must have one of the following privileges:

  • The manage_security cluster privilege (or a greater privilege such as all).
  • The "Manage Application Privileges" global privilege for the application being referenced in the request.

Application names are formed from a prefix, with an optional suffix that conform to the following rules:

  • The prefix must begin with a lowercase ASCII letter.
  • The prefix must contain only ASCII letters or digits.
  • The prefix must be at least 3 characters long.
  • If the suffix exists, it must begin with either a dash - or _.
  • The suffix cannot contain any of the following characters: \\, /, *, ?, ", <, >, |, ,, *.
  • No part of the name can contain whitespace.

Privilege names must begin with a lowercase ASCII letter and must contain only ASCII letters and digits along with the characters _, -, and ..

Action names can contain any number of printable ASCII characters and must contain at least one of the following characters: /, *, :.

``_ :param privileges: :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. """ if privileges is None and body is None: raise ValueError( "Empty value passed for parameters 'privileges' and 'body', one of them should be set." ) elif privileges is not None and body is not None: raise ValueError("Cannot set both 'privileges' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_security/privilege" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __body = privileges if privileges is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_privileges", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "applications", "cluster", "description", "global_", "indices", "metadata", "remote_cluster", "remote_indices", "run_as", "transient_metadata", ), parameter_aliases={"global": "global_"}, ) def put_role( self, *, name: str, applications: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, cluster: t.Optional[ t.Sequence[ t.Union[ str, t.Literal[ "all", "cancel_task", "create_snapshot", "cross_cluster_replication", "cross_cluster_search", "delegate_pki", "grant_api_key", "manage", "manage_api_key", "manage_autoscaling", "manage_behavioral_analytics", "manage_ccr", "manage_data_frame_transforms", "manage_data_stream_global_retention", "manage_enrich", "manage_esql", "manage_ilm", "manage_index_templates", "manage_inference", "manage_ingest_pipelines", "manage_logstash_pipelines", "manage_ml", "manage_oidc", "manage_own_api_key", "manage_pipeline", "manage_rollup", "manage_saml", "manage_search_application", "manage_search_query_rules", "manage_search_synonyms", "manage_security", "manage_service_account", "manage_slm", "manage_token", "manage_transform", "manage_user_profile", "manage_watcher", "monitor", "monitor_data_frame_transforms", "monitor_data_stream_global_retention", "monitor_enrich", "monitor_esql", "monitor_inference", "monitor_ml", "monitor_rollup", "monitor_snapshot", "monitor_stats", "monitor_text_structure", "monitor_transform", "monitor_watcher", "none", "post_behavioral_analytics_event", "read_ccr", "read_fleet_secrets", "read_ilm", "read_pipeline", "read_security", "read_slm", "transport_client", "write_connector_secrets", "write_fleet_secrets", ], ] ] ] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, global_: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, remote_cluster: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, remote_indices: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, run_as: t.Optional[t.Sequence[str]] = None, transient_metadata: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update roles.

The role management APIs are generally the preferred way to manage roles in the native realm, rather than using file-based role management. The create or update roles API cannot update roles that are defined in roles files. File-based role management is not available in Elastic Serverless.

``_ :param name: The name of the role that is being created or updated. On Elasticsearch Serverless, the role name must begin with a letter or digit and can only contain letters, digits and the characters '_', '-', and '.'. Each role must have a unique name, as this will serve as the identifier for that role. :param applications: A list of application privilege entries. :param cluster: A list of cluster privileges. These privileges define the cluster-level actions for users with this role. :param description: Optional description of the role descriptor :param global_: An object defining global privileges. A global privilege is a form of cluster privilege that is request-aware. Support for global privileges is currently limited to the management of application privileges. :param indices: A list of indices permissions entries. :param metadata: Optional metadata. Within the metadata object, keys that begin with an underscore (`_`) are reserved for system use. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param remote_cluster: A list of remote cluster permissions entries. :param remote_indices: A list of remote indices permissions entries. NOTE: Remote indices are effective for remote clusters configured with the API key based model. They have no effect for remote clusters configured with the certificate based model. :param run_as: A list of users that the owners of this role can impersonate. *Note*: in Serverless, the run-as feature is disabled. For API compatibility, you can still specify an empty `run_as` field, but a non-empty list will be rejected. :param transient_metadata: Indicates roles that might be incompatible with the current cluster license, specifically roles with document and field level security. When the cluster license doesn’t allow certain features for a given role, this parameter is updated dynamically to list the incompatible features. If `enabled` is `false`, the role is ignored, but is still listed in the response from the authenticate API. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if applications is not None: __body["applications"] = applications if cluster is not None: __body["cluster"] = cluster if description is not None: __body["description"] = description if global_ is not None: __body["global"] = global_ if indices is not None: __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata if remote_cluster is not None: __body["remote_cluster"] = remote_cluster if remote_indices is not None: __body["remote_indices"] = remote_indices if run_as is not None: __body["run_as"] = run_as if transient_metadata is not None: __body["transient_metadata"] = transient_metadata __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "enabled", "metadata", "role_templates", "roles", "rules", "run_as", ), ) def put_role_mapping( self, *, name: str, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, role_templates: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, roles: t.Optional[t.Sequence[str]] = None, rules: t.Optional[t.Mapping[str, t.Any]] = None, run_as: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update role mappings.

Role mappings define which roles are assigned to each user. Each mapping has rules that identify users and a list of roles that are granted to those users. The role mapping APIs are generally the preferred way to manage role mappings rather than using role mapping files. The create or update role mappings API cannot update role mappings that are defined in role mapping files.

NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using the create or update roles API or roles files.

Role templates

The most common use for role mappings is to create a mapping from a known value on the user to a fixed role name. For example, all users in the cn=admin,dc=example,dc=com LDAP group should be given the superuser role in Elasticsearch. The roles field is used for this purpose.

For more complex needs, it is possible to use Mustache templates to dynamically determine the names of the roles that should be granted to the user. The role_templates field is used for this purpose.

NOTE: To use role templates successfully, the relevant scripting feature must be enabled. Otherwise, all attempts to create a role mapping with role templates fail.

All of the user fields that are available in the role mapping rules are also available in the role templates. Thus it is possible to assign a user to a role that reflects their username, their groups, or the name of the realm to which they authenticated.

By default a template is evaluated to produce a single string that is the name of the role which should be assigned to the user. If the format of the template is set to "json" then the template is expected to produce a JSON string or an array of JSON strings for the role names.

``_ :param name: The distinct name that identifies the role mapping. The name is used solely as an identifier to facilitate interaction via the API; it does not affect the behavior of the mapping in any way. :param enabled: Mappings that have `enabled` set to `false` are ignored when role mapping is performed. :param metadata: Additional metadata that helps define which roles are assigned to each user. Within the metadata object, keys beginning with `_` are reserved for system usage. :param refresh: If `true` (the default) then refresh the affected shards to make this operation visible to search, if `wait_for` then wait for a refresh to make this operation visible to search, if `false` then do nothing with refreshes. :param role_templates: A list of Mustache templates that will be evaluated to determine the roles names that should granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. :param roles: A list of role names that are granted to the users that match the role mapping rules. Exactly one of `roles` or `role_templates` must be specified. :param rules: The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. :param run_as: """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"name": _quote(name)} __path = f'/_security/role_mapping/{__path_parts["name"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if enabled is not None: __body["enabled"] = enabled if metadata is not None: __body["metadata"] = metadata if role_templates is not None: __body["role_templates"] = role_templates if roles is not None: __body["roles"] = roles if rules is not None: __body["rules"] = rules if run_as is not None: __body["run_as"] = run_as __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_role_mapping", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "email", "enabled", "full_name", "metadata", "password", "password_hash", "roles", ), ) def put_user( self, *, username: str, email: t.Optional[t.Union[None, str]] = None, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, full_name: t.Optional[t.Union[None, str]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, password: t.Optional[str] = None, password_hash: t.Optional[str] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, roles: t.Optional[t.Sequence[str]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update users.

Add and update users in the native realm. A password is required for adding a new user but is optional when updating an existing user. To change a user's password without updating any other fields, use the change password API.

``_ :param username: An identifier for the user. NOTE: Usernames must be at least 1 and no more than 507 characters. They can contain alphanumeric characters (a-z, A-Z, 0-9), spaces, punctuation, and printable symbols in the Basic Latin (ASCII) block. Leading or trailing whitespace is not allowed. :param email: The email of the user. :param enabled: Specifies whether the user is enabled. :param full_name: The full name of the user. :param metadata: Arbitrary metadata that you want to associate with the user. :param password: The user's password. Passwords must be at least 6 characters long. When adding a user, one of `password` or `password_hash` is required. When updating an existing user, the password is optional, so that other fields on the user (such as their roles) may be updated without modifying the user's password :param password_hash: A hash of the user's password. This must be produced using the same hashing algorithm as has been configured for password storage. For more details, see the explanation of the `xpack.security.authc.password_hashing.algorithm` setting in the user cache and password hash algorithm documentation. Using this parameter allows the client to pre-hash the password for performance and/or confidentiality reasons. The `password` parameter and the `password_hash` parameter cannot be used in the same request. :param refresh: Valid values are `true`, `false`, and `wait_for`. These values have the same meaning as in the index API, but the default value for this API is true. :param roles: A set of roles the user has. The roles determine the user's access permissions. To create a user without any roles, specify an empty list (`[]`). """ if username in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'username'") __path_parts: t.Dict[str, str] = {"username": _quote(username)} __path = f'/_security/user/{__path_parts["username"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if email is not None: __body["email"] = email if enabled is not None: __body["enabled"] = enabled if full_name is not None: __body["full_name"] = full_name if metadata is not None: __body["metadata"] = metadata if password is not None: __body["password"] = password if password_hash is not None: __body["password_hash"] = password_hash if roles is not None: __body["roles"] = roles __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.put_user", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "aggregations", "aggs", "from_", "query", "search_after", "size", "sort", ), parameter_aliases={"from": "from_"}, ) def query_api_keys( self, *, aggregations: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, aggs: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, typed_keys: t.Optional[bool] = None, with_limited_by: t.Optional[bool] = None, with_profile_uid: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find API keys with a query.

Get a paginated list of API keys and their information. You can optionally filter the results with a query.

To use this API, you must have at least the manage_own_api_key or the read_security cluster privileges. If you have only the manage_own_api_key privilege, this API returns only the API keys that you own. If you have the read_security, manage_api_key, or greater privileges (including manage_security), this API returns all API keys regardless of ownership. Refer to the linked documentation for examples of how to find API keys:

``_ :param aggregations: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. :param aggs: Any aggregations to run over the corpus of returned API keys. Aggregations and queries work together. Aggregations are computed only on the API keys that match the query. This supports only a subset of aggregation types, namely: `terms`, `range`, `date_range`, `missing`, `cardinality`, `value_count`, `composite`, `filter`, and `filters`. Additionally, aggregations only run over the same subset of fields that query works with. :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which API keys to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following public information associated with an API key: `id`, `type`, `name`, `creation`, `expiration`, `invalidated`, `invalidation`, `username`, `realm`, and `metadata`. NOTE: The queryable string values associated with API keys are internally mapped as keywords. Consequently, if no `analyzer` parameter is specified for a `match` query, then the provided match query string is interpreted as a single keyword value. Such a match query is hence equivalent to a `term` query. :param search_after: The search after definition. :param size: The number of hits to return. It must not be negative. The `size` parameter can be set to `0`, in which case no API key matches are returned, only the aggregation results. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. Other than `id`, all public fields of an API key are eligible for sorting. In addition, sort can also be applied to the `_doc` field to sort by index order. :param typed_keys: Determines whether aggregation names are prefixed by their respective types in the response. :param with_limited_by: Return the snapshot of the owner user's role descriptors associated with the API key. An API key's actual permission is the intersection of its assigned role descriptors and the owner user's role descriptors (effectively limited by it). An API key cannot retrieve any API key’s limited-by role descriptors (including itself) unless it has `manage_api_key` or higher privileges. :param with_profile_uid: Determines whether to also retrieve the profile UID for the API key owner principal. If it exists, the profile UID is returned under the `profile_uid` response field for each API key. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/api_key" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if typed_keys is not None: __query["typed_keys"] = typed_keys if with_limited_by is not None: __query["with_limited_by"] = with_limited_by if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid if not __body: if aggregations is not None: __body["aggregations"] = aggregations if aggs is not None: __body["aggs"] = aggs if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_api_keys", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) def query_role( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find roles with a query.

Get roles in a paginated manner. The role management APIs are generally the preferred way to manage roles, rather than using file-based role management. The query roles API does not retrieve roles that are defined in roles files, nor built-in ones. You can optionally filter the results with a query. Also, the results can be paginated and sorted.

``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which roles to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with roles: `name`, `description`, `metadata`, `applications.application`, `applications.privileges`, and `applications.resources`. :param search_after: The search after definition. :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. You can sort on `username`, `roles`, or `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/role" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_role", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) def query_user( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, with_profile_uid: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find users with a query.

Get information for users in a paginated manner. You can optionally filter the results with a query.

NOTE: As opposed to the get user API, built-in users are excluded from the result. This API is only for native users.

``_ :param from_: The starting document offset. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param query: A query to filter which users to return. If the query parameter is missing, it is equivalent to a `match_all` query. The query supports a subset of query types, including `match_all`, `bool`, `term`, `terms`, `match`, `ids`, `prefix`, `wildcard`, `exists`, `range`, and `simple_query_string`. You can query the following information associated with user: `username`, `roles`, `enabled`, `full_name`, and `email`. :param search_after: The search after definition :param size: The number of hits to return. It must not be negative. By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the `search_after` parameter. :param sort: The sort definition. Fields eligible for sorting are: `username`, `roles`, `enabled`. In addition, sort can also be applied to the `_doc` field to sort by index order. :param with_profile_uid: Determines whether to retrieve the user profile UID, if it exists, for the users. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/_query/user" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if with_profile_uid is not None: __query["with_profile_uid"] = with_profile_uid if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.query_user", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("content", "ids", "realm"), ) def saml_authenticate( self, *, content: t.Optional[str] = None, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Authenticate SAML.

Submit a SAML response message to Elasticsearch for consumption.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The SAML message that is submitted can be:

  • A response to a SAML authentication request that was previously created using the SAML prepare authentication API.
  • An unsolicited SAML message in the case of an IdP-initiated single sign-on (SSO) flow.

In either case, the SAML message needs to be a base64 encoded XML document with a root element of <Response>.

After successful validation, Elasticsearch responds with an Elasticsearch internal access token and refresh token that can be subsequently used for authentication. This API endpoint essentially exchanges SAML responses that indicate successful authentication in the IdP for Elasticsearch access and refresh tokens, which can be used for authentication against Elasticsearch.

``_ :param content: The SAML response as it was sent by the user's browser, usually a Base64 encoded XML document. :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the realm that should authenticate the SAML response. Useful in cases where many SAML realms are defined. """ if content is None and body is None: raise ValueError("Empty value passed for parameter 'content'") if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/authenticate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if content is not None: __body["content"] = content if ids is not None: __body["ids"] = ids if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_authenticate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("ids", "realm", "content", "query_string"), ) def saml_complete_logout( self, *, ids: t.Optional[t.Union[str, t.Sequence[str]]] = None, realm: t.Optional[str] = None, content: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query_string: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of SAML completely.

Verifies the logout response sent from the SAML IdP.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The SAML IdP may send a logout response back to the SP after handling the SP-initiated SAML Single Logout. This API verifies the response by ensuring the content is relevant and validating its signature. An empty response is returned if the verification process is successful. The response can be sent by the IdP with either the HTTP-Redirect or the HTTP-Post binding. The caller of this API must prepare the request accordingly so that this API can handle either of them.

``_ :param ids: A JSON array with all the valid SAML Request Ids that the caller of the API has for the current user. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to verify the logout response. :param content: If the SAML IdP sends the logout response with the HTTP-Post binding, this field must be set to the value of the SAMLResponse form parameter from the logout response. :param query_string: If the SAML IdP sends the logout response with the HTTP-Redirect binding, this field must be set to the query string of the redirect URI. """ if ids is None and body is None: raise ValueError("Empty value passed for parameter 'ids'") if realm is None and body is None: raise ValueError("Empty value passed for parameter 'realm'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/complete_logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if ids is not None: __body["ids"] = ids if realm is not None: __body["realm"] = realm if content is not None: __body["content"] = content if query_string is not None: __body["query_string"] = query_string __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_complete_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query_string", "acs", "realm"), ) def saml_invalidate( self, *, query_string: t.Optional[str] = None, acs: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Invalidate SAML.

Submit a SAML LogoutRequest message to Elasticsearch for consumption.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

The logout request comes from the SAML IdP during an IdP initiated Single Logout. The custom web application can use this API to have Elasticsearch process the LogoutRequest. After successful validation of the request, Elasticsearch invalidates the access token and refresh token that corresponds to that specific SAML principal and provides a URL that contains a SAML LogoutResponse message. Thus the user can be redirected back to their IdP.

``_ :param query_string: The query part of the URL that the user was redirected to by the SAML IdP to initiate the Single Logout. This query should include a single parameter named `SAMLRequest` that contains a SAML logout request that is deflated and Base64 encoded. If the SAML IdP has signed the logout request, the URL should include two extra parameters named `SigAlg` and `Signature` that contain the algorithm used for the signature and the signature value itself. In order for Elasticsearch to be able to verify the IdP's signature, the value of the `query_string` field must be an exact match to the string provided by the browser. The client application must not attempt to parse or process the string in any way. :param acs: The Assertion Consumer Service URL that matches the one of the SAML realm in Elasticsearch that should be used. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch the configuration. You must specify either this parameter or the `acs` parameter. """ if query_string is None and body is None: raise ValueError("Empty value passed for parameter 'query_string'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/invalidate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query_string is not None: __body["query_string"] = query_string if acs is not None: __body["acs"] = acs if realm is not None: __body["realm"] = realm __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_invalidate", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("token", "refresh_token"), ) def saml_logout( self, *, token: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh_token: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Logout of SAML.

Submits a request to invalidate an access token and refresh token.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

This API invalidates the tokens that were generated for a user by the SAML authenticate API. If the SAML realm in Elasticsearch is configured accordingly and the SAML IdP supports this, the Elasticsearch response contains a URL to redirect the user to the IdP that contains a SAML logout request (starting an SP-initiated SAML Single Logout).

``_ :param token: The access token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent token that was received after refreshing the original one by using a `refresh_token`. :param refresh_token: The refresh token that was returned as a response to calling the SAML authenticate API. Alternatively, the most recent refresh token that was received after refreshing the original access token. """ if token is None and body is None: raise ValueError("Empty value passed for parameter 'token'") __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/logout" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if token is not None: __body["token"] = token if refresh_token is not None: __body["refresh_token"] = refresh_token __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_logout", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("acs", "realm", "relay_state"), ) def saml_prepare_authentication( self, *, acs: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, realm: t.Optional[str] = None, relay_state: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare SAML authentication.

Create a SAML authentication request (<AuthnRequest>) as a URL string based on the configuration of the respective SAML realm in Elasticsearch.

NOTE: This API is intended for use by custom web applications other than Kibana. If you are using Kibana, refer to the documentation for configuring SAML single-sign-on on the Elastic Stack.

This API returns a URL pointing to the SAML Identity Provider. You can use the URL to redirect the browser of the user in order to continue the authentication process. The URL includes a single parameter named SAMLRequest, which contains a SAML Authentication request that is deflated and Base64 encoded. If the configuration dictates that SAML authentication requests should be signed, the URL has two extra parameters named SigAlg and Signature. These parameters contain the algorithm used for the signature and the signature value itself. It also returns a random string that uniquely identifies this SAML Authentication request. The caller of this API needs to store this identifier as it needs to be used in a following step of the authentication process.

``_ :param acs: The Assertion Consumer Service URL that matches the one of the SAML realms in Elasticsearch. The realm is used to generate the authentication request. You must specify either this parameter or the `realm` parameter. :param realm: The name of the SAML realm in Elasticsearch for which the configuration is used to generate the authentication request. You must specify either this parameter or the `acs` parameter. :param relay_state: A string that will be included in the redirect URL that this API returns as the `RelayState` query parameter. If the Authentication Request is signed, this value is used as part of the signature computation. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/saml/prepare" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if acs is not None: __body["acs"] = acs if realm is not None: __body["realm"] = realm if relay_state is not None: __body["relay_state"] = relay_state __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.saml_prepare_authentication", path_parts=__path_parts, ) @_rewrite_parameters() def saml_service_provider_metadata( self, *, realm_name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create SAML service provider metadata.

Generate SAML metadata for a SAML 2.0 Service Provider.

The SAML 2.0 specification provides a mechanism for Service Providers to describe their capabilities and configuration using a metadata file. This API generates Service Provider metadata based on the configuration of a SAML realm in Elasticsearch.

``_ :param realm_name: The name of the SAML realm in Elasticsearch. """ if realm_name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'realm_name'") __path_parts: t.Dict[str, str] = {"realm_name": _quote(realm_name)} __path = f'/_security/saml/metadata/{__path_parts["realm_name"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="security.saml_service_provider_metadata", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data", "hint", "name", "size"), ) def suggest_user_profiles( self, *, data: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, hint: t.Optional[t.Mapping[str, t.Any]] = None, human: t.Optional[bool] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Suggest a user profile.

Get suggestions for user profiles that match specified search criteria.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

``_ :param data: A comma-separated list of filters for the `data` field of the profile document. To return all content use `data=*`. To return a subset of content, use `data=` to retrieve content nested under the specified ``. By default, the API returns no `data` content. It is an error to specify `data` as both the query parameter and the request body field. :param hint: Extra search criteria to improve relevance of the suggestion result. Profiles matching the spcified hint are ranked higher in the response. Profiles not matching the hint aren't excluded from the response as long as the profile matches the `name` field query. :param name: A query string used to match name-related fields in user profile documents. Name-related fields are the user's `username`, `full_name`, and `email`. :param size: The number of profiles to return. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/profile/_suggest" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if data is not None: __body["data"] = data if hint is not None: __body["hint"] = hint if name is not None: __body["name"] = name if size is not None: __body["size"] = size if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.suggest_user_profiles", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("expiration", "metadata", "role_descriptors"), ) def update_api_key( self, *, id: str, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, role_descriptors: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update an API key.

Update attributes of an existing API key. This API supports updates to an API key's access scope, expiration, and metadata.

To use this API, you must have at least the manage_own_api_key cluster privilege. Users can only update API keys that they created or that were granted to them. To update another user’s API key, use the run_as feature to submit a request on behalf of another user.

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. The owner user’s credentials are required.

Use this API to update API keys created by the create API key or grant API Key APIs. If you need to apply the same update to many API keys, you can use the bulk update API keys API to reduce overhead. It's not possible to update expired API keys or API keys that have been invalidated by the invalidate API key API.

The access scope of an API key is derived from the role_descriptors you specify in the request and a snapshot of the owner user's permissions at the time of the request. The snapshot of the owner's permissions is updated automatically on every call.

IMPORTANT: If you don't specify role_descriptors in the request, a call to this API might still change the API key's access scope. This change can occur if the owner user's permissions have changed since the API key was created or last modified.

``_ :param id: The ID of the API key to update. :param expiration: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the expiration unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports a nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this value fully replaces the metadata previously associated with the API key. :param role_descriptors: The role descriptors to assign to this API key. The API key's effective permissions are an intersection of its assigned privileges and the point in time snapshot of permissions of the owner user. You can assign new privileges by specifying them in this parameter. To remove assigned privileges, you can supply an empty `role_descriptors` parameter, that is to say, an empty object `{}`. If an API key has no assigned privileges, it inherits the owner user's full permissions. The snapshot of the owner's permissions is always updated, whether you supply the `role_descriptors` parameter or not. The structure of a role descriptor is the same as the request for the create API keys API. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_security/api_key/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata if role_descriptors is not None: __body["role_descriptors"] = role_descriptors if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("access", "expiration", "metadata"), ) def update_cross_cluster_api_key( self, *, id: str, access: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, expiration: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a cross-cluster API key.

Update the attributes of an existing cross-cluster API key, which is used for API key based remote cluster access.

To use this API, you must have at least the manage_security cluster privilege. Users can only update API keys that they created. To update another user's API key, use the run_as feature to submit a request on behalf of another user.

IMPORTANT: It's not possible to use an API key as the authentication credential for this API. To update an API key, the owner user's credentials are required.

It's not possible to update expired API keys, or API keys that have been invalidated by the invalidate API key API.

This API supports updates to an API key's access scope, metadata, and expiration. The owner user's information, such as the username and realm, is also updated automatically on every call.

NOTE: This API cannot update REST API keys, which should be updated by either the update API key or bulk update API keys API.

To learn more about how to use this API, refer to the Update cross cluter API key API examples page.

``_ :param id: The ID of the cross-cluster API key to update. :param access: The access to be granted to this API key. The access is composed of permissions for cross cluster search and cross cluster replication. At least one of them must be specified. When specified, the new access assignment fully replaces the previously assigned access. :param expiration: The expiration time for the API key. By default, API keys never expire. This property can be omitted to leave the value unchanged. :param metadata: Arbitrary metadata that you want to associate with the API key. It supports nested data structure. Within the metadata object, keys beginning with `_` are reserved for system usage. When specified, this information fully replaces metadata previously associated with the API key. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if access is None and body is None: raise ValueError("Empty value passed for parameter 'access'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_security/cross_cluster/api_key/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if access is not None: __body["access"] = access if expiration is not None: __body["expiration"] = expiration if metadata is not None: __body["metadata"] = metadata __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_cross_cluster_api_key", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("security", "security_profile", "security_tokens"), parameter_aliases={ "security-profile": "security_profile", "security-tokens": "security_tokens", }, ) def update_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, security: t.Optional[t.Mapping[str, t.Any]] = None, security_profile: t.Optional[t.Mapping[str, t.Any]] = None, security_tokens: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update security index settings.

Update the user-configurable settings for the security internal index (.security and associated indices). Only a subset of settings are allowed to be modified. This includes index.auto_expand_replicas and index.number_of_replicas.

NOTE: If index.auto_expand_replicas is set, index.number_of_replicas will be ignored during updates.

If a specific index is not in use on the system and settings are provided for it, the request will be rejected. This API does not yet support configuring the settings for indices before they are in use.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param security: Settings for the index used for most security configuration, including native realm users and roles configured with the API. :param security_profile: Settings for the index used to store profile information. :param security_tokens: Settings for the index used to store tokens. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_security/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if security is not None: __body["security"] = security if security_profile is not None: __body["security-profile"] = security_profile if security_tokens is not None: __body["security-tokens"] = security_tokens __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_settings", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("data", "labels"), ) def update_user_profile_data( self, *, uid: str, data: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, labels: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[ t.Union[bool, str, t.Literal["false", "true", "wait_for"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update user profile data.

Update specific data for the user profile that is associated with a unique ID.

NOTE: The user profile feature is designed only for use by Kibana and Elastic's Observability, Enterprise Search, and Elastic Security solutions. Individual users and external applications should not call this API directly. Elastic reserves the right to change or remove this feature in future releases without prior notice.

To use this API, you must have one of the following privileges:

  • The manage_user_profile cluster privilege.
  • The update_profile_data global privilege for the namespaces that are referenced in the request.

This API updates the labels and data fields of an existing user profile document with JSON objects. New keys and their values are added to the profile document and conflicting keys are replaced by data that's included in the request.

For both labels and data, content is namespaced by the top-level fields. The update_profile_data global privilege grants privileges for updating only the allowed namespaces.

``_ :param uid: A unique identifier for the user profile. :param data: Non-searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the `data` object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). The data object is not searchable, but can be retrieved with the get user profile API. :param if_primary_term: Only perform the operation if the document has this primary term. :param if_seq_no: Only perform the operation if the document has this sequence number. :param labels: Searchable data that you want to associate with the user profile. This field supports a nested data structure. Within the labels object, top-level keys cannot begin with an underscore (`_`) or contain a period (`.`). :param refresh: If 'true', Elasticsearch refreshes the affected shards to make this operation visible to search. If 'wait_for', it waits for a refresh to make this operation visible to search. If 'false', nothing is done with refreshes. """ if uid in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'uid'") __path_parts: t.Dict[str, str] = {"uid": _quote(uid)} __path = f'/_security/profile/{__path_parts["uid"]}/_data' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if data is not None: __body["data"] = data if labels is not None: __body["labels"] = labels __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="security.update_user_profile_data", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/shutdown.py000066400000000000000000000316401506101734100253050ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class ShutdownClient(NamespacedClient): @_rewrite_parameters() def delete_node( self, *, node_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel node shutdown preparations. Remove a node from the shutdown list so it can resume normal operations. You must explicitly clear the shutdown request when a node rejoins the cluster or when a node has permanently left the cluster. Shutdown requests are never removed automatically by Elasticsearch.

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If the operator privileges feature is enabled, you must be an operator to use this API.

``_ :param node_id: The node id of node to be removed from the shutdown state :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="shutdown.delete_node", path_parts=__path_parts, ) @_rewrite_parameters() def get_node( self, *, node_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the shutdown status.

Get information about nodes that are ready to be shut down, have shut down preparations still in progress, or have stalled. The API returns status information for each part of the shut down process.

NOTE: This feature is designed for indirect use by Elasticsearch Service, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If the operator privileges feature is enabled, you must be an operator to use this API.

``_ :param node_id: Which node for which to retrieve the shutdown status :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if node_id not in SKIP_IN_PATH: __path_parts = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' else: __path_parts = {} __path = "/_nodes/shutdown" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="shutdown.get_node", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("reason", "type", "allocation_delay", "target_node_name"), ) def put_node( self, *, node_id: str, reason: t.Optional[str] = None, type: t.Optional[ t.Union[str, t.Literal["remove", "replace", "restart"]] ] = None, allocation_delay: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, pretty: t.Optional[bool] = None, target_node_name: t.Optional[str] = None, timeout: t.Optional[ t.Union[str, t.Literal["d", "h", "m", "micros", "ms", "nanos", "s"]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Prepare a node to be shut down.

NOTE: This feature is designed for indirect use by Elastic Cloud, Elastic Cloud Enterprise, and Elastic Cloud on Kubernetes. Direct use is not supported.

If you specify a node that is offline, it will be prepared for shut down when it rejoins the cluster.

If the operator privileges feature is enabled, you must be an operator to use this API.

The API migrates ongoing tasks and index shards to other nodes as needed to prepare a node to be restarted or shut down and removed from the cluster. This ensures that Elasticsearch can be stopped safely with minimal disruption to the cluster.

You must specify the type of shutdown: restart, remove, or replace. If a node is already being prepared for shutdown, you can use this API to change the shutdown type.

IMPORTANT: This API does NOT terminate the Elasticsearch process. Monitor the node shutdown status to determine when it is safe to stop Elasticsearch.

``_ :param node_id: The node identifier. This parameter is not validated against the cluster's active nodes. This enables you to register a node for shut down while it is offline. No error is thrown if you specify an invalid node ID. :param reason: A human-readable reason that the node is being shut down. This field provides information for other cluster operators; it does not affect the shut down process. :param type: Valid values are restart, remove, or replace. Use restart when you need to temporarily shut down a node to perform an upgrade, make configuration changes, or perform other maintenance. Because the node is expected to rejoin the cluster, data is not migrated off of the node. Use remove when you need to permanently remove a node from the cluster. The node is not marked ready for shutdown until data is migrated off of the node Use replace to do a 1:1 replacement of a node with another node. Certain allocation decisions will be ignored (such as disk watermarks) in the interest of true replacement of the source node with the target node. During a replace-type shutdown, rollover and index creation may result in unassigned shards, and shrink may fail until the replacement is complete. :param allocation_delay: Only valid if type is restart. Controls how long Elasticsearch will wait for the node to restart and join the cluster before reassigning its shards to other nodes. This works the same as delaying allocation with the index.unassigned.node_left.delayed_timeout setting. If you specify both a restart allocation delay and an index-level allocation delay, the longer of the two is used. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param target_node_name: Only valid if type is replace. Specifies the name of the node that is replacing the node being shut down. Shards from the shut down node are only allowed to be allocated to the target node, and no other data will be allocated to the target node. During relocation of data certain allocation rules are ignored, such as disk watermarks or user attribute filtering rules. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'node_id'") if reason is None and body is None: raise ValueError("Empty value passed for parameter 'reason'") if type is None and body is None: raise ValueError("Empty value passed for parameter 'type'") __path_parts: t.Dict[str, str] = {"node_id": _quote(node_id)} __path = f'/_nodes/{__path_parts["node_id"]}/shutdown' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if reason is not None: __body["reason"] = reason if type is not None: __body["type"] = type if allocation_delay is not None: __body["allocation_delay"] = allocation_delay if target_node_name is not None: __body["target_node_name"] = target_node_name __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="shutdown.put_node", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/simulate.py000066400000000000000000000162501506101734100252550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SimulateClient(NamespacedClient): @_rewrite_parameters( body_fields=( "docs", "component_template_substitutions", "index_template_substitutions", "mapping_addition", "pipeline_substitutions", ), ) @_stability_warning(Stability.EXPERIMENTAL) def ingest( self, *, docs: t.Optional[t.Sequence[t.Mapping[str, t.Any]]] = None, index: t.Optional[str] = None, component_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_template_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, mapping_addition: t.Optional[t.Mapping[str, t.Any]] = None, pipeline: t.Optional[str] = None, pipeline_substitutions: t.Optional[ t.Mapping[str, t.Mapping[str, t.Any]] ] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Simulate data ingestion. Run ingest pipelines against a set of provided documents, optionally with substitute pipeline definitions, to simulate ingesting data into an index.

This API is meant to be used for troubleshooting or pipeline development, as it does not actually index any data into Elasticsearch.

The API runs the default and final pipeline for that index against a set of documents provided in the body of the request. If a pipeline contains a reroute processor, it follows that reroute processor to the new index, running that index's pipelines as well the same way that a non-simulated ingest would. No data is indexed into Elasticsearch. Instead, the transformed document is returned, along with the list of pipelines that have been run and the name of the index where the document would have been indexed if this were not a simulation. The transformed document is validated against the mappings that would apply to this index, and any validation error is reported in the result.

This API differs from the simulate pipeline API in that you specify a single pipeline for that API, and it runs only that one pipeline. The simulate pipeline API is more useful for developing a single pipeline, while the simulate ingest API is more useful for troubleshooting the interaction of the various pipelines that get applied when ingesting into an index.

By default, the pipeline definitions that are currently in the system are used. However, you can supply substitute pipeline definitions in the body of the request. These will be used in place of the pipeline definitions that are already in the system. This can be used to replace existing pipeline definitions or to create new ones. The pipeline substitutions are used only within this request.

``_ :param docs: Sample documents to test in the pipeline. :param index: The index to simulate ingesting into. This value can be overridden by specifying an index on each document. If you specify this parameter in the request path, it is used for any documents that do not explicitly specify an index argument. :param component_template_substitutions: A map of component template names to substitute component template definition objects. :param index_template_substitutions: A map of index template names to substitute index template definition objects. :param mapping_addition: :param pipeline: The pipeline to use as the default pipeline. This value can be used to override the default pipeline of the index. :param pipeline_substitutions: Pipelines to test. If you don’t specify the `pipeline` request path parameter, this parameter is required. If you specify both this and the request path parameter, the API only uses the request path parameter. """ if docs is None and body is None: raise ValueError("Empty value passed for parameter 'docs'") __path_parts: t.Dict[str, str] if index not in SKIP_IN_PATH: __path_parts = {"index": _quote(index)} __path = f'/_ingest/{__path_parts["index"]}/_simulate' else: __path_parts = {} __path = "/_ingest/_simulate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pipeline is not None: __query["pipeline"] = pipeline if pretty is not None: __query["pretty"] = pretty if not __body: if docs is not None: __body["docs"] = docs if component_template_substitutions is not None: __body["component_template_substitutions"] = ( component_template_substitutions ) if index_template_substitutions is not None: __body["index_template_substitutions"] = index_template_substitutions if mapping_addition is not None: __body["mapping_addition"] = mapping_addition if pipeline_substitutions is not None: __body["pipeline_substitutions"] = pipeline_substitutions __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="simulate.ingest", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/slm.py000066400000000000000000000572731506101734100242370ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SlmClient(NamespacedClient): @_rewrite_parameters() def delete_lifecycle( self, *, policy_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a policy. Delete a snapshot lifecycle policy definition. This operation prevents any future snapshots from being taken but does not cancel in-progress snapshots or remove previously-taken snapshots.

``_ :param policy_id: The id of the snapshot lifecycle policy to remove :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="slm.delete_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def execute_lifecycle( self, *, policy_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a policy. Immediately create a snapshot according to the snapshot lifecycle policy without waiting for the scheduled time. The snapshot policy is normally applied according to its schedule, but you might want to manually run a policy before performing an upgrade or other maintenance.

``_ :param policy_id: The id of the snapshot lifecycle policy to be executed :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}/_execute' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="slm.execute_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def execute_retention( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a retention policy. Manually apply the retention policy to force immediate removal of snapshots that are expired according to the snapshot lifecycle policy retention rules. The retention policy is normally applied according to its schedule.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/_execute_retention" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.execute_retention", path_parts=__path_parts, ) @_rewrite_parameters() def get_lifecycle( self, *, policy_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get policy information. Get snapshot lifecycle policy definitions and information about the latest snapshot attempts.

``_ :param policy_id: Comma-separated list of snapshot lifecycle policies to retrieve :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if policy_id not in SKIP_IN_PATH: __path_parts = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' else: __path_parts = {} __path = "/_slm/policy" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def get_stats( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot lifecycle management statistics. Get global and policy-level statistics about actions taken by snapshot lifecycle management.

``_ :param master_timeout: Period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stats" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_stats", path_parts=__path_parts, ) @_rewrite_parameters() def get_status( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the snapshot lifecycle management status.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="slm.get_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("config", "name", "repository", "retention", "schedule"), ) def put_lifecycle( self, *, policy_id: str, config: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, name: t.Optional[str] = None, pretty: t.Optional[bool] = None, repository: t.Optional[str] = None, retention: t.Optional[t.Mapping[str, t.Any]] = None, schedule: t.Optional[str] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a policy. Create or update a snapshot lifecycle policy. If the policy already exists, this request increments the policy version. Only the latest version of a policy is stored.

``_ :param policy_id: The identifier for the snapshot lifecycle policy you want to create or update. :param config: Configuration for each snapshot created by the policy. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param name: Name automatically assigned to each snapshot created by the policy. Date math is supported. To prevent conflicting snapshot names, a UUID is automatically appended to each snapshot name. :param repository: Repository used to store snapshots created by this policy. This repository must exist prior to the policy’s creation. You can create a repository using the snapshot repository API. :param retention: Retention rules used to retain and delete snapshots created by the policy. :param schedule: Periodic or absolute schedule at which the policy creates snapshots. SLM applies schedule changes immediately. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if policy_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'policy_id'") __path_parts: t.Dict[str, str] = {"policy_id": _quote(policy_id)} __path = f'/_slm/policy/{__path_parts["policy_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if config is not None: __body["config"] = config if name is not None: __body["name"] = name if repository is not None: __body["repository"] = repository if retention is not None: __body["retention"] = retention if schedule is not None: __body["schedule"] = schedule if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="slm.put_lifecycle", path_parts=__path_parts, ) @_rewrite_parameters() def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start snapshot lifecycle management. Snapshot lifecycle management (SLM) starts automatically when a cluster is formed. Manually starting SLM is necessary only if it has been stopped using the stop SLM API.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.start", path_parts=__path_parts, ) @_rewrite_parameters() def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop snapshot lifecycle management. Stop all snapshot lifecycle management (SLM) operations and the SLM plugin. This API is useful when you are performing maintenance on a cluster and need to prevent SLM from performing any actions on your data streams or indices. Stopping SLM does not stop any snapshots that are in progress. You can manually trigger snapshots with the run snapshot lifecycle policy API even if SLM is stopped.

The API returns a response as soon as the request is acknowledged, but the plugin might continue to run until in-progress operations complete and it can be safely stopped. Use the get snapshot lifecycle management status API to see if SLM is running.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_slm/stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="slm.stop", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/snapshot.py000066400000000000000000002325471506101734100253020ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class SnapshotClient(NamespacedClient): @_rewrite_parameters() def cleanup_repository( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clean up the snapshot repository. Trigger the review of the contents of a snapshot repository and delete any stale data not referenced by existing snapshots.

``_ :param name: The name of the snapshot repository to clean up. :param master_timeout: The period to wait for a connection to the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1` :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_cleanup' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.cleanup_repository", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("indices",), ) def clone( self, *, repository: str, snapshot: str, target_snapshot: str, indices: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clone a snapshot. Clone part of all of a snapshot into another snapshot in the same repository.

``_ :param repository: The name of the snapshot repository that both source and target snapshot belong to. :param snapshot: The source snapshot name. :param target_snapshot: The target snapshot name. :param indices: A comma-separated list of indices to include in the snapshot. Multi-target syntax is supported. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") if target_snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'target_snapshot'") if indices is None and body is None: raise ValueError("Empty value passed for parameter 'indices'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), "target_snapshot": _quote(target_snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_clone/{__path_parts["target_snapshot"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if not __body: if indices is not None: __body["indices"] = indices __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.clone", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "expand_wildcards", "feature_states", "ignore_unavailable", "include_global_state", "indices", "metadata", "partial", ), ) def create( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, expand_wildcards: t.Optional[ t.Union[ t.Sequence[ t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]] ], t.Union[str, t.Literal["all", "closed", "hidden", "none", "open"]], ] ] = None, feature_states: t.Optional[t.Sequence[str]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_global_state: t.Optional[bool] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, partial: t.Optional[bool] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a snapshot. Take a snapshot of a cluster or of data streams and indices.

``_ :param repository: The name of the repository for the snapshot. :param snapshot: The name of the snapshot. It supportes date math. It must be unique in the repository. :param expand_wildcards: Determines how wildcard patterns in the `indices` parameter match data streams and indices. It supports comma-separated values such as `open,hidden`. :param feature_states: The feature states to include in the snapshot. Each feature state includes one or more system indices containing related data. You can view a list of eligible features using the get features API. If `include_global_state` is `true`, all current feature states are included by default. If `include_global_state` is `false`, no feature states are included by default. Note that specifying an empty array will result in the default behavior. To exclude all feature states, regardless of the `include_global_state` value, specify an array with only the value `none` (`["none"]`). :param ignore_unavailable: If `true`, the request ignores data streams and indices in `indices` that are missing or closed. If `false`, the request returns an error for any data stream or index that is missing or closed. :param include_global_state: If `true`, the current cluster state is included in the snapshot. The cluster state includes persistent cluster settings, composable index templates, legacy index templates, ingest pipelines, and ILM policies. It also includes data stored in system indices, such as Watches and task records (configurable via `feature_states`). :param indices: A comma-separated list of data streams and indices to include in the snapshot. It supports a multi-target syntax. The default is an empty array (`[]`), which includes all regular data streams and regular indices. To exclude all data streams and indices, use `-*`. You can't use this parameter to include or exclude system indices or system data streams from a snapshot. Use `feature_states` instead. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param metadata: Arbitrary metadata to the snapshot, such as a record of who took the snapshot, why it was taken, or any other useful data. It can have any contents but it must be less than 1024 bytes. This information is not automatically generated by Elasticsearch. :param partial: If `true`, it enables you to restore a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. :param wait_for_completion: If `true`, the request returns a response when the snapshot is complete. If `false`, the request returns a response when the snapshot initializes. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if expand_wildcards is not None: __body["expand_wildcards"] = expand_wildcards if feature_states is not None: __body["feature_states"] = feature_states if ignore_unavailable is not None: __body["ignore_unavailable"] = ignore_unavailable if include_global_state is not None: __body["include_global_state"] = include_global_state if indices is not None: __body["indices"] = indices if metadata is not None: __body["metadata"] = metadata if partial is not None: __body["partial"] = partial if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.create", path_parts=__path_parts, ) @_rewrite_parameters( body_name="repository", ) def create_repository( self, *, name: str, repository: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, verify: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a snapshot repository. IMPORTANT: If you are migrating searchable snapshots, the repository name must be identical in the source and destination clusters. To register a snapshot repository, the cluster's global metadata must be writeable. Ensure there are no cluster blocks (for example, cluster.blocks.read_only and clsuter.blocks.read_only_allow_delete settings) that prevent write access.

Several options for this API can be specified using a query parameter or a request body parameter. If both parameters are specified, only the query parameter is used.

``_ :param name: The name of the snapshot repository to register or update. :param repository: :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. :param verify: If `true`, the request verifies the repository is functional on all master and data nodes in the cluster. If `false`, this verification is skipped. You can also perform this verification with the verify snapshot repository API. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") if repository is None and body is None: raise ValueError( "Empty value passed for parameters 'repository' and 'body', one of them should be set." ) elif repository is not None and body is not None: raise ValueError("Cannot set both 'repository' and 'body'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if verify is not None: __query["verify"] = verify __body = repository if repository is not None else body __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.create_repository", path_parts=__path_parts, ) @_rewrite_parameters() def delete( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete snapshots.

``_ :param repository: The name of the repository to delete a snapshot from. :param snapshot: A comma-separated list of snapshot names to delete. It also accepts wildcards (`*`). :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param wait_for_completion: If `true`, the request returns a response when the matching snapshots are all deleted. If `false`, the request returns a response as soon as the deletes are scheduled. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="snapshot.delete", path_parts=__path_parts, ) @_rewrite_parameters() def delete_repository( self, *, name: t.Union[str, t.Sequence[str]], error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete snapshot repositories. When a repository is unregistered, Elasticsearch removes only the reference to the location where the repository is storing the snapshots. The snapshots themselves are left untouched and in place.

``_ :param name: The ame of the snapshot repositories to unregister. Wildcard (`*`) patterns are supported. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="snapshot.delete_repository", path_parts=__path_parts, ) @_rewrite_parameters() def get( self, *, repository: str, snapshot: t.Union[str, t.Sequence[str]], after: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_sort_value: t.Optional[str] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, include_repository: t.Optional[bool] = None, index_details: t.Optional[bool] = None, index_names: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, offset: t.Optional[int] = None, order: t.Optional[t.Union[str, t.Literal["asc", "desc"]]] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, slm_policy_filter: t.Optional[str] = None, sort: t.Optional[ t.Union[ str, t.Literal[ "duration", "failed_shard_count", "index_count", "name", "repository", "shard_count", "start_time", ], ] ] = None, state: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS", ], ] ], t.Union[ str, t.Literal[ "FAILED", "INCOMPATIBLE", "IN_PROGRESS", "PARTIAL", "SUCCESS" ], ], ] ] = None, verbose: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot information.

NOTE: The after parameter and next field enable you to iterate through snapshots with some consistency guarantees regarding concurrent creation or deletion of snapshots. It is guaranteed that any snapshot that exists at the beginning of the iteration and is not concurrently deleted will be seen during the iteration. Snapshots concurrently created may be seen during an iteration.

``_ :param repository: A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported. :param snapshot: A comma-separated list of snapshot names to retrieve Wildcards (`*`) are supported. * To get information about all snapshots in a registered repository, use a wildcard (`*`) or `_all`. * To get information about any snapshots that are currently running, use `_current`. :param after: An offset identifier to start pagination from as returned by the next field in the response body. :param from_sort_value: The value of the current sort column at which to start retrieval. It can be a string `snapshot-` or a repository name when sorting by snapshot or repository name. It can be a millisecond time value or a number when sorting by `index-` or shard count. :param ignore_unavailable: If `false`, the request returns an error for any snapshots that are unavailable. :param include_repository: If `true`, the response includes the repository name in each snapshot. :param index_details: If `true`, the response includes additional information about each index in the snapshot comprising the number of shards in the index, the total size of the index in bytes, and the maximum number of segments per shard in the index. The default is `false`, meaning that this information is omitted. :param index_names: If `true`, the response includes the name of each index in each snapshot. :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param offset: Numeric offset to start pagination from based on the snapshots matching this request. Using a non-zero value for this parameter is mutually exclusive with using the after parameter. Defaults to 0. :param order: The sort order. Valid values are `asc` for ascending and `desc` for descending order. The default behavior is ascending order. :param size: The maximum number of snapshots to return. The default is 0, which means to return all that match the request without limit. :param slm_policy_filter: Filter snapshots by a comma-separated list of snapshot lifecycle management (SLM) policy names that snapshots belong to. You can use wildcards (`*`) and combinations of wildcards followed by exclude patterns starting with `-`. For example, the pattern `*,-policy-a-\\*` will return all snapshots except for those that were created by an SLM policy with a name starting with `policy-a-`. Note that the wildcard pattern `*` matches all snapshots created by an SLM policy but not those snapshots that were not created by an SLM policy. To include snapshots that were not created by an SLM policy, you can use the special pattern `_none` that will match all snapshots without an SLM policy. :param sort: The sort order for the result. The default behavior is sorting by snapshot start time stamp. :param state: Only return snapshots with a state found in the given comma-separated list of snapshot states. The default is all snapshot states. :param verbose: If `true`, returns additional information about each snapshot such as the version of Elasticsearch which took the snapshot, the start and end times of the snapshot, and the number of shards snapshotted. NOTE: The parameters `size`, `order`, `after`, `from_sort_value`, `offset`, `slm_policy_filter`, and `sort` are not supported when you set `verbose=false` and the sort order for requests with `verbose=false` is undefined. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}' __query: t.Dict[str, t.Any] = {} if after is not None: __query["after"] = after if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_sort_value is not None: __query["from_sort_value"] = from_sort_value if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if include_repository is not None: __query["include_repository"] = include_repository if index_details is not None: __query["index_details"] = index_details if index_names is not None: __query["index_names"] = index_names if master_timeout is not None: __query["master_timeout"] = master_timeout if offset is not None: __query["offset"] = offset if order is not None: __query["order"] = order if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if slm_policy_filter is not None: __query["slm_policy_filter"] = slm_policy_filter if sort is not None: __query["sort"] = sort if state is not None: __query["state"] = state if verbose is not None: __query["verbose"] = verbose __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.get", path_parts=__path_parts, ) @_rewrite_parameters() def get_repository( self, *, name: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, local: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get snapshot repository information.

``_ :param name: A comma-separated list of snapshot repository names used to limit the request. Wildcard (`*`) expressions are supported including combining wildcards with exclude patterns starting with `-`. To get information about all snapshot repositories registered in the cluster, omit this parameter or use `*` or `_all`. :param local: If `true`, the request gets information from the local node only. If `false`, the request gets information from the master node. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] if name not in SKIP_IN_PATH: __path_parts = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}' else: __path_parts = {} __path = "/_snapshot" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if local is not None: __query["local"] = local if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.get_repository", path_parts=__path_parts, ) @_rewrite_parameters() def repository_analyze( self, *, name: str, blob_count: t.Optional[int] = None, concurrency: t.Optional[int] = None, detailed: t.Optional[bool] = None, early_read_node_count: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, max_blob_size: t.Optional[t.Union[int, str]] = None, max_total_data_size: t.Optional[t.Union[int, str]] = None, pretty: t.Optional[bool] = None, rare_action_probability: t.Optional[float] = None, rarely_abort_writes: t.Optional[bool] = None, read_node_count: t.Optional[int] = None, register_operation_count: t.Optional[int] = None, seed: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Analyze a snapshot repository.

Performs operations on a snapshot repository in order to check for incorrect behaviour.

There are a large number of third-party storage systems available, not all of which are suitable for use as a snapshot repository by Elasticsearch. Some storage systems behave incorrectly, or perform poorly, especially when accessed concurrently by multiple clients as the nodes of an Elasticsearch cluster do. This API performs a collection of read and write operations on your repository which are designed to detect incorrect behaviour and to measure the performance characteristics of your storage system.

The default values for the parameters are deliberately low to reduce the impact of running an analysis inadvertently and to provide a sensible starting point for your investigations. Run your first analysis with the default parameter values to check for simple problems. Some repositories may behave correctly when lightly loaded but incorrectly under production-like workloads. If the first analysis is successful, run a sequence of increasingly large analyses until you encounter a failure or you reach a blob_count of at least 2000, a max_blob_size of at least 2gb, a max_total_data_size of at least 1tb, and a register_operation_count of at least 100. Always specify a generous timeout, possibly 1h or longer, to allow time for each analysis to run to completion. Some repositories may behave correctly when accessed by a small number of Elasticsearch nodes but incorrectly when accessed concurrently by a production-scale cluster. Perform the analyses using a multi-node cluster of a similar size to your production cluster so that it can detect any problems that only arise when the repository is accessed by many nodes at once.

If the analysis fails, Elasticsearch detected that your repository behaved unexpectedly. This usually means you are using a third-party storage system with an incorrect or incompatible implementation of the API it claims to support. If so, this storage system is not suitable for use as a snapshot repository. Repository analysis triggers conditions that occur only rarely when taking snapshots in a production system. Snapshotting to unsuitable storage may appear to work correctly most of the time despite repository analysis failures. However your snapshot data is at risk if you store it in a snapshot repository that does not reliably pass repository analysis. You can demonstrate that the analysis failure is due to an incompatible storage implementation by verifying that Elasticsearch does not detect the same problem when analysing the reference implementation of the storage protocol you are using. For instance, if you are using storage that offers an API which the supplier claims to be compatible with AWS S3, verify that repositories in AWS S3 do not fail repository analysis. This allows you to demonstrate to your storage supplier that a repository analysis failure must only be caused by an incompatibility with AWS S3 and cannot be attributed to a problem in Elasticsearch. Please do not report Elasticsearch issues involving third-party storage systems unless you can demonstrate that the same issue exists when analysing a repository that uses the reference implementation of the same storage protocol. You will need to work with the supplier of your storage system to address the incompatibilities that Elasticsearch detects.

If the analysis is successful, the API returns details of the testing process, optionally including how long each operation took. You can use this information to determine the performance of your storage system. If any operation fails or returns an incorrect result, the API returns an error. If the API returns an error, it may not have removed all the data it wrote to the repository. The error will indicate the location of any leftover data and this path is also recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it.

If the connection from your client to Elasticsearch is closed while the client is waiting for the result of the analysis, the test is cancelled. Some clients are configured to close their connection if no response is received within a certain timeout. An analysis takes a long time to complete so you might need to relax any such client-side timeouts. On cancellation the analysis attempts to clean up the data it was writing, but it may not be able to remove it all. The path to the leftover data is recorded in the Elasticsearch logs. You should verify that this location has been cleaned up correctly. If there is still leftover data at the specified location, you should manually remove it.

If the analysis is successful then it detected no incorrect behaviour, but this does not mean that correct behaviour is guaranteed. The analysis attempts to detect common bugs but it does not offer 100% coverage. Additionally, it does not test the following:

  • Your repository must perform durable writes. Once a blob has been written it must remain in place until it is deleted, even after a power loss or similar disaster.
  • Your repository must not suffer from silent data corruption. Once a blob has been written, its contents must remain unchanged until it is deliberately modified or deleted.
  • Your repository must behave correctly even if connectivity from the cluster is disrupted. Reads and writes may fail in this case, but they must not return incorrect results.

IMPORTANT: An analysis writes a substantial amount of data to your repository and then reads it back again. This consumes bandwidth on the network between the cluster and the repository, and storage space and I/O bandwidth on the repository itself. You must ensure this load does not affect other users of these systems. Analyses respect the repository settings max_snapshot_bytes_per_sec and max_restore_bytes_per_sec if available and the cluster setting indices.recovery.max_bytes_per_sec which you can use to limit the bandwidth they consume.

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions. The response exposes immplementation details of the analysis which may change from version to version.

NOTE: Different versions of Elasticsearch may perform different checks for repository compatibility, with newer versions typically being stricter than older ones. A storage system that passes repository analysis with one version of Elasticsearch may fail with a different version. This indicates it behaves incorrectly in ways that the former version did not detect. You must work with the supplier of your storage system to address the incompatibilities detected by the repository analysis API in any version of Elasticsearch.

NOTE: This API may not work correctly in a mixed-version cluster.

Implementation details

NOTE: This section of documentation describes how the repository analysis API works in this version of Elasticsearch, but you should expect the implementation to vary between versions. The request parameters and response format depend on details of the implementation so may also be different in newer versions.

The analysis comprises a number of blob-level tasks, as set by the blob_count parameter and a number of compare-and-exchange operations on linearizable registers, as set by the register_operation_count parameter. These tasks are distributed over the data and master-eligible nodes in the cluster for execution.

For most blob-level tasks, the executing node first writes a blob to the repository and then instructs some of the other nodes in the cluster to attempt to read the data it just wrote. The size of the blob is chosen randomly, according to the max_blob_size and max_total_data_size parameters. If any of these reads fails then the repository does not implement the necessary read-after-write semantics that Elasticsearch requires.

For some blob-level tasks, the executing node will instruct some of its peers to attempt to read the data before the writing process completes. These reads are permitted to fail, but must not return partial data. If any read returns partial data then the repository does not implement the necessary atomicity semantics that Elasticsearch requires.

For some blob-level tasks, the executing node will overwrite the blob while its peers are reading it. In this case the data read may come from either the original or the overwritten blob, but the read operation must not return partial data or a mix of data from the two blobs. If any of these reads returns partial data or a mix of the two blobs then the repository does not implement the necessary atomicity semantics that Elasticsearch requires for overwrites.

The executing node will use a variety of different methods to write the blob. For instance, where applicable, it will use both single-part and multi-part uploads. Similarly, the reading nodes will use a variety of different methods to read the data back again. For instance they may read the entire blob from start to end or may read only a subset of the data.

For some blob-level tasks, the executing node will cancel the write before it is complete. In this case, it still instructs some of the other nodes in the cluster to attempt to read the blob but all of these reads must fail to find the blob.

Linearizable registers are special blobs that Elasticsearch manipulates using an atomic compare-and-exchange operation. This operation ensures correct and strongly-consistent behavior even when the blob is accessed by multiple nodes at the same time. The detailed implementation of the compare-and-exchange operation on linearizable registers varies by repository type. Repository analysis verifies that that uncontended compare-and-exchange operations on a linearizable register blob always succeed. Repository analysis also verifies that contended operations either succeed or report the contention but do not return incorrect results. If an operation fails due to contention, Elasticsearch retries the operation until it succeeds. Most of the compare-and-exchange operations performed by repository analysis atomically increment a counter which is represented as an 8-byte blob. Some operations also verify the behavior on small blobs with sizes other than 8 bytes.

``_ :param name: The name of the repository. :param blob_count: The total number of blobs to write to the repository during the test. For realistic experiments, you should set it to at least `2000`. :param concurrency: The number of operations to run concurrently during the test. :param detailed: Indicates whether to return detailed results, including timing information for every operation performed during the analysis. If false, it returns only a summary of the analysis. :param early_read_node_count: The number of nodes on which to perform an early read operation while writing each blob. Early read operations are only rarely performed. :param max_blob_size: The maximum size of a blob to be written during the test. For realistic experiments, you should set it to at least `2gb`. :param max_total_data_size: An upper limit on the total size of all the blobs written during the test. For realistic experiments, you should set it to at least `1tb`. :param rare_action_probability: The probability of performing a rare action such as an early read, an overwrite, or an aborted write on each blob. :param rarely_abort_writes: Indicates whether to rarely cancel writes before they complete. :param read_node_count: The number of nodes on which to read a blob after writing. :param register_operation_count: The minimum number of linearizable register operations to perform in total. For realistic experiments, you should set it to at least `100`. :param seed: The seed for the pseudo-random number generator used to generate the list of operations performed during the test. To repeat the same set of operations in multiple experiments, use the same seed in each experiment. Note that the operations are performed concurrently so might not always happen in the same order on each run. :param timeout: The period of time to wait for the test to complete. If no response is received before the timeout expires, the test is cancelled and returns an error. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_analyze' __query: t.Dict[str, t.Any] = {} if blob_count is not None: __query["blob_count"] = blob_count if concurrency is not None: __query["concurrency"] = concurrency if detailed is not None: __query["detailed"] = detailed if early_read_node_count is not None: __query["early_read_node_count"] = early_read_node_count if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if max_blob_size is not None: __query["max_blob_size"] = max_blob_size if max_total_data_size is not None: __query["max_total_data_size"] = max_total_data_size if pretty is not None: __query["pretty"] = pretty if rare_action_probability is not None: __query["rare_action_probability"] = rare_action_probability if rarely_abort_writes is not None: __query["rarely_abort_writes"] = rarely_abort_writes if read_node_count is not None: __query["read_node_count"] = read_node_count if register_operation_count is not None: __query["register_operation_count"] = register_operation_count if seed is not None: __query["seed"] = seed if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.repository_analyze", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def repository_verify_integrity( self, *, name: t.Union[str, t.Sequence[str]], blob_thread_pool_concurrency: t.Optional[int] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_snapshot_verification_concurrency: t.Optional[int] = None, index_verification_concurrency: t.Optional[int] = None, max_bytes_per_sec: t.Optional[str] = None, max_failed_shard_snapshots: t.Optional[int] = None, meta_thread_pool_concurrency: t.Optional[int] = None, pretty: t.Optional[bool] = None, snapshot_verification_concurrency: t.Optional[int] = None, verify_blob_contents: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Verify the repository integrity. Verify the integrity of the contents of a snapshot repository.

This API enables you to perform a comprehensive check of the contents of a repository, looking for any anomalies in its data or metadata which might prevent you from restoring snapshots from the repository or which might cause future snapshot create or delete operations to fail.

If you suspect the integrity of the contents of one of your snapshot repositories, cease all write activity to this repository immediately, set its read_only option to true, and use this API to verify its integrity. Until you do so:

  • It may not be possible to restore some snapshots from this repository.
  • Searchable snapshots may report errors when searched or may have unassigned shards.
  • Taking snapshots into this repository may fail or may appear to succeed but have created a snapshot which cannot be restored.
  • Deleting snapshots from this repository may fail or may appear to succeed but leave the underlying data on disk.
  • Continuing to write to the repository while it is in an invalid state may causing additional damage to its contents.

If the API finds any problems with the integrity of the contents of your repository, Elasticsearch will not be able to repair the damage. The only way to bring the repository back into a fully working state after its contents have been damaged is by restoring its contents from a repository backup which was taken before the damage occurred. You must also identify what caused the damage and take action to prevent it from happening again.

If you cannot restore a repository backup, register a new repository and use this for all future snapshot operations. In some cases it may be possible to recover some of the contents of a damaged repository, either by restoring as many of its snapshots as needed and taking new snapshots of the restored data, or by using the reindex API to copy data from any searchable snapshots mounted from the damaged repository.

Avoid all operations which write to the repository while the verify repository integrity API is running. If something changes the repository contents while an integrity verification is running then Elasticsearch may incorrectly report having detected some anomalies in its contents due to the concurrent writes. It may also incorrectly fail to report some anomalies that the concurrent writes prevented it from detecting.

NOTE: This API is intended for exploratory use by humans. You should expect the request parameters and the response format to vary in future versions.

NOTE: This API may not work correctly in a mixed-version cluster.

The default values for the parameters of this API are designed to limit the impact of the integrity verification on other activities in your cluster. For instance, by default it will only use at most half of the snapshot_meta threads to verify the integrity of each snapshot, allowing other snapshot operations to use the other half of this thread pool. If you modify these parameters to speed up the verification process, you risk disrupting other snapshot-related operations in your cluster. For large repositories, consider setting up a separate single-node Elasticsearch cluster just for running the integrity verification API.

The response exposes implementation details of the analysis which may change from version to version. The response body format is therefore not considered stable and may be different in newer versions.

``_ :param name: The name of the snapshot repository. :param blob_thread_pool_concurrency: If `verify_blob_contents` is `true`, this parameter specifies how many blobs to verify at once. :param index_snapshot_verification_concurrency: The maximum number of index snapshots to verify concurrently within each index verification. :param index_verification_concurrency: The number of indices to verify concurrently. The default behavior is to use the entire `snapshot_meta` thread pool. :param max_bytes_per_sec: If `verify_blob_contents` is `true`, this parameter specifies the maximum amount of data that Elasticsearch will read from the repository every second. :param max_failed_shard_snapshots: The number of shard snapshot failures to track during integrity verification, in order to avoid excessive resource usage. If your repository contains more than this number of shard snapshot failures, the verification will fail. :param meta_thread_pool_concurrency: The maximum number of snapshot metadata operations to run concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. :param snapshot_verification_concurrency: The number of snapshots to verify concurrently. The default behavior is to use at most half of the `snapshot_meta` thread pool at once. :param verify_blob_contents: Indicates whether to verify the checksum of every data blob in the repository. If this feature is enabled, Elasticsearch will read the entire repository contents, which may be extremely slow and expensive. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_verify_integrity' __query: t.Dict[str, t.Any] = {} if blob_thread_pool_concurrency is not None: __query["blob_thread_pool_concurrency"] = blob_thread_pool_concurrency if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if index_snapshot_verification_concurrency is not None: __query["index_snapshot_verification_concurrency"] = ( index_snapshot_verification_concurrency ) if index_verification_concurrency is not None: __query["index_verification_concurrency"] = index_verification_concurrency if max_bytes_per_sec is not None: __query["max_bytes_per_sec"] = max_bytes_per_sec if max_failed_shard_snapshots is not None: __query["max_failed_shard_snapshots"] = max_failed_shard_snapshots if meta_thread_pool_concurrency is not None: __query["meta_thread_pool_concurrency"] = meta_thread_pool_concurrency if pretty is not None: __query["pretty"] = pretty if snapshot_verification_concurrency is not None: __query["snapshot_verification_concurrency"] = ( snapshot_verification_concurrency ) if verify_blob_contents is not None: __query["verify_blob_contents"] = verify_blob_contents __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.repository_verify_integrity", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "feature_states", "ignore_index_settings", "ignore_unavailable", "include_aliases", "include_global_state", "index_settings", "indices", "partial", "rename_pattern", "rename_replacement", ), ) def restore( self, *, repository: str, snapshot: str, error_trace: t.Optional[bool] = None, feature_states: t.Optional[t.Sequence[str]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_index_settings: t.Optional[t.Sequence[str]] = None, ignore_unavailable: t.Optional[bool] = None, include_aliases: t.Optional[bool] = None, include_global_state: t.Optional[bool] = None, index_settings: t.Optional[t.Mapping[str, t.Any]] = None, indices: t.Optional[t.Union[str, t.Sequence[str]]] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, partial: t.Optional[bool] = None, pretty: t.Optional[bool] = None, rename_pattern: t.Optional[str] = None, rename_replacement: t.Optional[str] = None, wait_for_completion: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Restore a snapshot. Restore a snapshot of a cluster or data streams and indices.

You can restore a snapshot only to a running cluster with an elected master node. The snapshot repository must be registered and available to the cluster. The snapshot and cluster versions must be compatible.

To restore a snapshot, the cluster's global metadata must be writable. Ensure there are't any cluster blocks that prevent writes. The restore operation ignores index blocks.

Before you restore a data stream, ensure the cluster contains a matching index template with data streams enabled. To check, use the index management feature in Kibana or the get index template API:

GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream
          

If no such template exists, you can create one or restore a cluster state that contains one. Without a matching index template, a data stream can't roll over or create backing indices.

If your snapshot contains data from App Search or Workplace Search, you must restore the Enterprise Search encryption key before you restore the snapshot.

``_ :param repository: The name of the repository to restore a snapshot from. :param snapshot: The name of the snapshot to restore. :param feature_states: The feature states to restore. If `include_global_state` is `true`, the request restores all feature states in the snapshot by default. If `include_global_state` is `false`, the request restores no feature states by default. Note that specifying an empty array will result in the default behavior. To restore no feature states, regardless of the `include_global_state` value, specify an array containing only the value `none` (`["none"]`). :param ignore_index_settings: The index settings to not restore from the snapshot. You can't use this option to ignore `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. :param ignore_unavailable: If `true`, the request ignores any index or data stream in indices that's missing from the snapshot. If `false`, the request returns an error for any missing index or data stream. :param include_aliases: If `true`, the request restores aliases for any restored data streams and indices. If `false`, the request doesn’t restore aliases. :param include_global_state: If `true`, restore the cluster state. The cluster state includes: * Persistent cluster settings * Index templates * Legacy index templates * Ingest pipelines * Index lifecycle management (ILM) policies * Stored scripts * For snapshots taken after 7.12.0, feature states If `include_global_state` is `true`, the restore operation merges the legacy index templates in your cluster with the templates contained in the snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines, and ILM lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. Use the `feature_states` parameter to configure how feature states are restored. If `include_global_state` is `true` and a snapshot was created without a global state then the restore request will fail. :param index_settings: Index settings to add or change in restored indices, including backing indices. You can't use this option to change `index.number_of_shards`. For data streams, this option applies only to restored backing indices. New backing indices are configured using the data stream's matching index template. :param indices: A comma-separated list of indices and data streams to restore. It supports a multi-target syntax. The default behavior is all regular indices and regular data streams in the snapshot. You can't use this parameter to restore system indices or system data streams. Use `feature_states` instead. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param partial: If `false`, the entire restore operation will fail if one or more indices included in the snapshot do not have all primary shards available. If true, it allows restoring a partial snapshot of indices with unavailable shards. Only shards that were successfully included in the snapshot will be restored. All missing shards will be recreated as empty. :param rename_pattern: A rename pattern to apply to restored data streams and indices. Data streams and indices matching the rename pattern will be renamed according to `rename_replacement`. The rename pattern is applied as defined by the regular expression that supports referencing the original text, according to the `appendReplacement` logic. :param rename_replacement: The rename replacement string that is used with the `rename_pattern`. :param wait_for_completion: If `true`, the request returns a response when the restore operation completes. The operation is complete when it finishes all attempts to recover primary shards for restored indices. This applies even if one or more of the recovery attempts fail. If `false`, the request returns a response when the restore operation initializes. """ if repository in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'repository'") if snapshot in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'snapshot'") __path_parts: t.Dict[str, str] = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_restore' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion if not __body: if feature_states is not None: __body["feature_states"] = feature_states if ignore_index_settings is not None: __body["ignore_index_settings"] = ignore_index_settings if ignore_unavailable is not None: __body["ignore_unavailable"] = ignore_unavailable if include_aliases is not None: __body["include_aliases"] = include_aliases if include_global_state is not None: __body["include_global_state"] = include_global_state if index_settings is not None: __body["index_settings"] = index_settings if indices is not None: __body["indices"] = indices if partial is not None: __body["partial"] = partial if rename_pattern is not None: __body["rename_pattern"] = rename_pattern if rename_replacement is not None: __body["rename_replacement"] = rename_replacement if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="snapshot.restore", path_parts=__path_parts, ) @_rewrite_parameters() def status( self, *, repository: t.Optional[str] = None, snapshot: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_unavailable: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the snapshot status. Get a detailed description of the current state for each shard participating in the snapshot.

Note that this API should be used only to obtain detailed shard-level information for ongoing snapshots. If this detail is not needed or you want to obtain information about one or more existing snapshots, use the get snapshot API.

If you omit the <snapshot> request path parameter, the request retrieves information only for currently running snapshots. This usage is preferred. If needed, you can specify <repository> and <snapshot> to retrieve information for specific snapshots, even if they're not currently running.

WARNING: Using the API to return the status of any snapshots other than currently running snapshots can be expensive. The API requires a read from the repository for each shard in each snapshot. For example, if you have 100 snapshots with 1,000 shards each, an API request that includes all snapshots will require 100,000 reads (100 snapshots x 1,000 shards).

Depending on the latency of your storage, such requests can take an extremely long time to return results. These requests can also tax machine resources and, when using cloud storage, incur high processing costs.

``_ :param repository: The snapshot repository name used to limit the request. It supports wildcards (`*`) if `` isn't specified. :param snapshot: A comma-separated list of snapshots to retrieve status for. The default is currently running snapshots. Wildcards (`*`) are not supported. :param ignore_unavailable: If `false`, the request returns an error for any snapshots that are unavailable. If `true`, the request ignores snapshots that are unavailable, such as those that are corrupted or temporarily cannot be returned. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] if repository not in SKIP_IN_PATH and snapshot not in SKIP_IN_PATH: __path_parts = { "repository": _quote(repository), "snapshot": _quote(snapshot), } __path = f'/_snapshot/{__path_parts["repository"]}/{__path_parts["snapshot"]}/_status' elif repository not in SKIP_IN_PATH: __path_parts = {"repository": _quote(repository)} __path = f'/_snapshot/{__path_parts["repository"]}/_status' else: __path_parts = {} __path = "/_snapshot/_status" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if ignore_unavailable is not None: __query["ignore_unavailable"] = ignore_unavailable if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="snapshot.status", path_parts=__path_parts, ) @_rewrite_parameters() def verify_repository( self, *, name: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Verify a snapshot repository. Check for common misconfigurations in a snapshot repository.

``_ :param name: The name of the snapshot repository to verify. :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. :param timeout: The period to wait for a response from all relevant nodes in the cluster after updating the cluster metadata. If no response is received before the timeout expires, the cluster metadata update still applies but the response will indicate that it was not completely acknowledged. To indicate that the request should never timeout, set it to `-1`. """ if name in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'name'") __path_parts: t.Dict[str, str] = {"repository": _quote(name)} __path = f'/_snapshot/{__path_parts["repository"]}/_verify' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="snapshot.verify_repository", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/sql.py000066400000000000000000000471041506101734100242330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SqlClient(NamespacedClient): @_rewrite_parameters( body_fields=("cursor",), ) def clear_cursor( self, *, cursor: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Clear an SQL search cursor.

``_ :param cursor: Cursor to clear. """ if cursor is None and body is None: raise ValueError("Empty value passed for parameter 'cursor'") __path_parts: t.Dict[str, str] = {} __path = "/_sql/close" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if cursor is not None: __body["cursor"] = cursor __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.clear_cursor", path_parts=__path_parts, ) @_rewrite_parameters() def delete_async( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete an async SQL search. Delete an async SQL search or a stored synchronous SQL search. If the search is still running, the API cancels it.

If the Elasticsearch security features are enabled, only the following users can use this API to delete a search:

  • Users with the cancel_task cluster privilege.
  • The user who first submitted the search.
``_ :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/delete/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="sql.delete_async", path_parts=__path_parts, ) @_rewrite_parameters() def get_async( self, *, id: str, delimiter: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[str] = None, human: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get async SQL search results. Get the current status and available results for an async SQL search or stored synchronous SQL search.

If the Elasticsearch security features are enabled, only the user who first submitted the SQL search can retrieve the search using this API.

``_ :param id: The identifier for the search. :param delimiter: The separator for CSV results. The API supports this parameter only for CSV responses. :param format: The format for the response. You must specify a format using this parameter or the `Accept` HTTP header. If you specify both, the API uses this parameter. :param keep_alive: The retention period for the search and its results. It defaults to the `keep_alive` period for the original SQL search. :param wait_for_completion_timeout: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if delimiter is not None: __query["delimiter"] = delimiter if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if keep_alive is not None: __query["keep_alive"] = keep_alive if pretty is not None: __query["pretty"] = pretty if wait_for_completion_timeout is not None: __query["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="sql.get_async", path_parts=__path_parts, ) @_rewrite_parameters() def get_async_status( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get the async SQL search status. Get the current status of an async SQL search or a stored synchronous SQL search.

``_ :param id: The identifier for the search. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_sql/async/status/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="sql.get_async_status", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "allow_partial_search_results", "catalog", "columnar", "cursor", "fetch_size", "field_multi_value_leniency", "filter", "index_using_frozen", "keep_alive", "keep_on_completion", "page_timeout", "params", "query", "request_timeout", "runtime_mappings", "time_zone", "wait_for_completion_timeout", ), ignore_deprecated_options={"params", "request_timeout"}, ) def query( self, *, allow_partial_search_results: t.Optional[bool] = None, catalog: t.Optional[str] = None, columnar: t.Optional[bool] = None, cursor: t.Optional[str] = None, error_trace: t.Optional[bool] = None, fetch_size: t.Optional[int] = None, field_multi_value_leniency: t.Optional[bool] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["cbor", "csv", "json", "smile", "tsv", "txt", "yaml"] ] ] = None, human: t.Optional[bool] = None, index_using_frozen: t.Optional[bool] = None, keep_alive: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, keep_on_completion: t.Optional[bool] = None, page_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, params: t.Optional[t.Sequence[t.Any]] = None, pretty: t.Optional[bool] = None, query: t.Optional[str] = None, request_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, runtime_mappings: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, time_zone: t.Optional[str] = None, wait_for_completion_timeout: t.Optional[ t.Union[str, t.Literal[-1], t.Literal[0]] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get SQL search results. Run an SQL request.

``_ :param allow_partial_search_results: If `true`, the response has partial results when there are shard request timeouts or shard failures. If `false`, the API returns an error with no partial results. :param catalog: The default catalog (cluster) for queries. If unspecified, the queries execute on the data in the local cluster only. :param columnar: If `true`, the results are in a columnar fashion: one row represents all the values of a certain column from the current page of results. The API supports this parameter only for CBOR, JSON, SMILE, and YAML responses. :param cursor: The cursor used to retrieve a set of paginated results. If you specify a cursor, the API only uses the `columnar` and `time_zone` request body parameters. It ignores other request body parameters. :param fetch_size: The maximum number of rows (or entries) to return in one response. :param field_multi_value_leniency: If `false`, the API returns an exception when encountering multiple values for a field. If `true`, the API is lenient and returns the first value from the array with no guarantee of consistent results. :param filter: The Elasticsearch query DSL for additional filtering. :param format: The format for the response. You can also specify a format using the `Accept` HTTP header. If you specify both this parameter and the `Accept` HTTP header, this parameter takes precedence. :param index_using_frozen: If `true`, the search can run on frozen indices. :param keep_alive: The retention period for an async or saved synchronous search. :param keep_on_completion: If `true`, Elasticsearch stores synchronous searches if you also specify the `wait_for_completion_timeout` parameter. If `false`, Elasticsearch only stores async searches that don't finish before the `wait_for_completion_timeout`. :param page_timeout: The minimum retention period for the scroll cursor. After this time period, a pagination request might fail because the scroll cursor is no longer available. Subsequent scroll requests prolong the lifetime of the scroll cursor by the duration of `page_timeout` in the scroll request. :param params: The values for parameters in the query. :param query: The SQL query to run. :param request_timeout: The timeout before the request fails. :param runtime_mappings: One or more runtime fields for the search request. These fields take precedence over mapped fields with the same name. :param time_zone: The ISO-8601 time zone ID for the search. :param wait_for_completion_timeout: The period to wait for complete results. It defaults to no timeout, meaning the request waits for complete search results. If the search doesn't finish within this period, the search becomes async. To save a synchronous search, you must specify this parameter and the `keep_on_completion` parameter. """ __path_parts: t.Dict[str, str] = {} __path = "/_sql" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if allow_partial_search_results is not None: __body["allow_partial_search_results"] = allow_partial_search_results if catalog is not None: __body["catalog"] = catalog if columnar is not None: __body["columnar"] = columnar if cursor is not None: __body["cursor"] = cursor if fetch_size is not None: __body["fetch_size"] = fetch_size if field_multi_value_leniency is not None: __body["field_multi_value_leniency"] = field_multi_value_leniency if filter is not None: __body["filter"] = filter if index_using_frozen is not None: __body["index_using_frozen"] = index_using_frozen if keep_alive is not None: __body["keep_alive"] = keep_alive if keep_on_completion is not None: __body["keep_on_completion"] = keep_on_completion if page_timeout is not None: __body["page_timeout"] = page_timeout if params is not None: __body["params"] = params if query is not None: __body["query"] = query if request_timeout is not None: __body["request_timeout"] = request_timeout if runtime_mappings is not None: __body["runtime_mappings"] = runtime_mappings if time_zone is not None: __body["time_zone"] = time_zone if wait_for_completion_timeout is not None: __body["wait_for_completion_timeout"] = wait_for_completion_timeout __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.query", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("query", "fetch_size", "filter", "time_zone"), ) def translate( self, *, query: t.Optional[str] = None, error_trace: t.Optional[bool] = None, fetch_size: t.Optional[int] = None, filter: t.Optional[t.Mapping[str, t.Any]] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, time_zone: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Translate SQL into Elasticsearch queries. Translate an SQL search into a search API request containing Query DSL. It accepts the same request body parameters as the SQL search API, excluding cursor.

``_ :param query: The SQL query to run. :param fetch_size: The maximum number of rows (or entries) to return in one response. :param filter: The Elasticsearch query DSL for additional filtering. :param time_zone: The ISO-8601 time zone ID for the search. """ if query is None and body is None: raise ValueError("Empty value passed for parameter 'query'") __path_parts: t.Dict[str, str] = {} __path = "/_sql/translate" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if query is not None: __body["query"] = query if fetch_size is not None: __body["fetch_size"] = fetch_size if filter is not None: __body["filter"] = filter if time_zone is not None: __body["time_zone"] = time_zone __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="sql.translate", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/ssl.py000066400000000000000000000072641506101734100242400ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class SslClient(NamespacedClient): @_rewrite_parameters() def certificates( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get SSL certificates.

Get information about the X.509 certificates that are used to encrypt communications in the cluster. The API returns a list that includes certificates from all TLS contexts including:

  • Settings for transport and HTTP interfaces
  • TLS settings that are used within authentication realms
  • TLS settings for remote monitoring exporters

The list includes certificates that are used for configuring trust, such as those configured in the xpack.security.transport.ssl.truststore and xpack.security.transport.ssl.certificate_authorities settings. It also includes certificates that are used for configuring server identity, such as xpack.security.http.ssl.keystore and xpack.security.http.ssl.certificate settings.

The list does not include certificates that are sourced from the default SSL context of the Java Runtime Environment (JRE), even if those certificates are in use within Elasticsearch.

NOTE: When a PKCS#11 token is configured as the truststore of the JRE, the API returns all the certificates that are included in the PKCS#11 token irrespective of whether these are used in the Elasticsearch TLS configuration.

If Elasticsearch is configured to use a keystore or truststore, the API output includes all certificates in that store, even though some of the certificates might not be in active use within the cluster.

``_ """ __path_parts: t.Dict[str, str] = {} __path = "/_ssl/certificates" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="ssl.certificates", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/synonyms.py000066400000000000000000000420741506101734100253340ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class SynonymsClient(NamespacedClient): @_rewrite_parameters() def delete_synonym( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a synonym set.

You can only delete a synonyms set that is not in use by any index analyzer.

Synonyms sets can be used in synonym graph token filters and synonym token filters. These synonym filters can be used as part of search analyzers.

Analyzers need to be loaded when an index is restored (such as when a node starts, or the index becomes open). Even if the analyzer is not used on any field mapping, it still needs to be loaded on the index recovery phase.

If any analyzers cannot be loaded, the index becomes unavailable and the cluster status becomes red or yellow as index shards are not available. To prevent that, synonyms sets that are used in analyzers can't be deleted. A delete request in this case will return a 400 response code.

To remove a synonyms set, you must first remove all indices that contain analyzers using it. You can migrate an index by creating a new index that does not contain the token filter with the synonyms set, and use the reindex API in order to copy over the index data. Once finished, you can delete the index. When the synonyms set is not used in analyzers, you will be able to delete it.

``_ :param id: The synonyms set identifier to delete. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="synonyms.delete_synonym", path_parts=__path_parts, ) @_rewrite_parameters() def delete_synonym_rule( self, *, set_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a synonym rule. Delete a synonym rule from a synonym set.

``_ :param set_id: The ID of the synonym set to update. :param rule_id: The ID of the synonym rule to delete. :param refresh: If `true`, the request will refresh the analyzers with the deleted synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the deleted synonym rule """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="synonyms.delete_synonym_rule", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_synonym( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a synonym set.

``_ :param id: The synonyms set identifier to retrieve. :param from_: The starting offset for query rules to retrieve. :param size: The max number of query rules to retrieve. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonym", path_parts=__path_parts, ) @_rewrite_parameters() def get_synonym_rule( self, *, set_id: str, rule_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a synonym rule. Get a synonym rule from a synonym set.

``_ :param set_id: The ID of the synonym set to retrieve the synonym rule from. :param rule_id: The ID of the synonym rule to retrieve. """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonym_rule", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_synonyms_sets( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all synonym sets. Get a summary of all defined synonym sets.

``_ :param from_: The starting offset for synonyms sets to retrieve. :param size: The maximum number of synonyms sets to retrieve. """ __path_parts: t.Dict[str, str] = {} __path = "/_synonyms" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="synonyms.get_synonyms_sets", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("synonyms_set",), ) def put_synonym( self, *, id: str, synonyms_set: t.Optional[ t.Union[t.Mapping[str, t.Any], t.Sequence[t.Mapping[str, t.Any]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a synonym set. Synonyms sets are limited to a maximum of 10,000 synonym rules per set. If you need to manage more synonym rules, you can create multiple synonym sets.

When an existing synonyms set is updated, the search analyzers that use the synonyms set are reloaded automatically for all indices. This is equivalent to invoking the reload search analyzers API for all indices that use the synonyms set.

For practical examples of how to create or update a synonyms set, refer to the External documentation.

``_ :param id: The ID of the synonyms set to be created or updated. :param synonyms_set: The synonym rules definitions for the synonyms set. :param refresh: If `true`, the request will refresh the analyzers with the new synonyms set and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym set """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") if synonyms_set is None and body is None: raise ValueError("Empty value passed for parameter 'synonyms_set'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_synonyms/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if synonyms_set is not None: __body["synonyms_set"] = synonyms_set __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="synonyms.put_synonym", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("synonyms",), ) def put_synonym_rule( self, *, set_id: str, rule_id: str, synonyms: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, refresh: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a synonym rule. Create or update a synonym rule in a synonym set.

If any of the synonym rules included is invalid, the API returns an error.

When you update a synonym rule, all analyzers using the synonyms set will be reloaded automatically to reflect the new rule.

``_ :param set_id: The ID of the synonym set. :param rule_id: The ID of the synonym rule to be updated or created. :param synonyms: The synonym rule information definition, which must be in Solr format. :param refresh: If `true`, the request will refresh the analyzers with the new synonym rule and wait for the new synonyms to be available before returning. If `false`, analyzers will not be reloaded with the new synonym rule """ if set_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'set_id'") if rule_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'rule_id'") if synonyms is None and body is None: raise ValueError("Empty value passed for parameter 'synonyms'") __path_parts: t.Dict[str, str] = { "set_id": _quote(set_id), "rule_id": _quote(rule_id), } __path = f'/_synonyms/{__path_parts["set_id"]}/{__path_parts["rule_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if refresh is not None: __query["refresh"] = refresh if not __body: if synonyms is not None: __body["synonyms"] = synonyms __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="synonyms.put_synonym_rule", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/tasks.py000066400000000000000000000325031506101734100245560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import ( SKIP_IN_PATH, Stability, _quote, _rewrite_parameters, _stability_warning, ) class TasksClient(NamespacedClient): @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def cancel( self, *, task_id: t.Optional[str] = None, actions: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Sequence[str]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Cancel a task.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

A task may continue to run for some time after it has been cancelled because it may not be able to safely stop its current activity straight away. It is also possible that Elasticsearch must complete its work on other tasks before it can process the cancellation. The get task information API will continue to list these cancelled tasks until they complete. The cancelled flag in the response indicates that the cancellation command has been processed and the task will stop as soon as possible.

To troubleshoot why a cancelled task does not complete promptly, use the get task information API with the ?detailed parameter to identify the other tasks the system is running. You can also use the node hot threads API to obtain detailed information about the work the system is doing instead of completing the cancelled task.

``_ :param task_id: The task identifier. :param actions: A comma-separated list or wildcard expression of actions that is used to limit the request. :param nodes: A comma-separated list of node IDs or names that is used to limit the request. :param parent_task_id: A parent task ID that is used to limit the tasks. :param wait_for_completion: If true, the request blocks until all found tasks are complete. """ __path_parts: t.Dict[str, str] if task_id not in SKIP_IN_PATH: __path_parts = {"task_id": _quote(task_id)} __path = f'/_tasks/{__path_parts["task_id"]}/_cancel' else: __path_parts = {} __path = "/_tasks/_cancel" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="tasks.cancel", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def get( self, *, task_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get task information. Get information about a task currently running in the cluster.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

If the task identifier is not found, a 404 response code indicates that there are no resources that match the request.

``_ :param task_id: The task identifier. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. :param wait_for_completion: If `true`, the request blocks until the task has completed. """ if task_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'task_id'") __path_parts: t.Dict[str, str] = {"task_id": _quote(task_id)} __path = f'/_tasks/{__path_parts["task_id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="tasks.get", path_parts=__path_parts, ) @_rewrite_parameters() @_stability_warning(Stability.EXPERIMENTAL) def list( self, *, actions: t.Optional[t.Union[str, t.Sequence[str]]] = None, detailed: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, group_by: t.Optional[ t.Union[str, t.Literal["nodes", "none", "parents"]] ] = None, human: t.Optional[bool] = None, nodes: t.Optional[t.Union[str, t.Sequence[str]]] = None, parent_task_id: t.Optional[str] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get all tasks. Get information about the tasks currently running on one or more nodes in the cluster.

WARNING: The task management API is new and should still be considered a beta feature. The API may change in ways that are not backwards compatible.

Identifying running tasks

The X-Opaque-Id header, when provided on the HTTP request header, is going to be returned as a header in the response as well as in the headers field for in the task information. This enables you to track certain calls or associate certain tasks with the client that started them. For example:

curl -i -H "X-Opaque-Id: 123456" "http://localhost:9200/_tasks?group_by=parents"
          

The API returns the following result:

HTTP/1.1 200 OK
          X-Opaque-Id: 123456
          content-type: application/json; charset=UTF-8
          content-length: 831

          {
            "tasks" : {
              "u5lcZHqcQhu-rUoFaqDphA:45" : {
                "node" : "u5lcZHqcQhu-rUoFaqDphA",
                "id" : 45,
                "type" : "transport",
                "action" : "cluster:monitor/tasks/lists",
                "start_time_in_millis" : 1513823752749,
                "running_time_in_nanos" : 293139,
                "cancellable" : false,
                "headers" : {
                  "X-Opaque-Id" : "123456"
                },
                "children" : [
                  {
                    "node" : "u5lcZHqcQhu-rUoFaqDphA",
                    "id" : 46,
                    "type" : "direct",
                    "action" : "cluster:monitor/tasks/lists[n]",
                    "start_time_in_millis" : 1513823752750,
                    "running_time_in_nanos" : 92133,
                    "cancellable" : false,
                    "parent_task_id" : "u5lcZHqcQhu-rUoFaqDphA:45",
                    "headers" : {
                      "X-Opaque-Id" : "123456"
                    }
                  }
                ]
              }
            }
           }
          

In this example, X-Opaque-Id: 123456 is the ID as a part of the response header. The X-Opaque-Id in the task headers is the ID for the task that was initiated by the REST request. The X-Opaque-Id in the children headers is the child task of the task that was initiated by the REST request.

``_ :param actions: A comma-separated list or wildcard expression of actions used to limit the request. For example, you can use `cluser:*` to retrieve all cluster-related tasks. :param detailed: If `true`, the response includes detailed information about the running tasks. This information is useful to distinguish tasks from each other but is more costly to run. :param group_by: A key that is used to group tasks in the response. The task lists can be grouped either by nodes or by parent tasks. :param nodes: A comma-separated list of node IDs or names that is used to limit the returned information. :param parent_task_id: A parent task identifier that is used to limit returned information. To return all tasks, omit this parameter or use a value of `-1`. If the parent task is not found, the API does not return a 404 response code. :param timeout: The period to wait for each node to respond. If a node does not respond before its timeout expires, the response does not include its information. However, timed out nodes are included in the `node_failures` property. :param wait_for_completion: If `true`, the request blocks until the operation is complete. """ __path_parts: t.Dict[str, str] = {} __path = "/_tasks" __query: t.Dict[str, t.Any] = {} if actions is not None: __query["actions"] = actions if detailed is not None: __query["detailed"] = detailed if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if group_by is not None: __query["group_by"] = group_by if human is not None: __query["human"] = human if nodes is not None: __query["nodes"] = nodes if parent_task_id is not None: __query["parent_task_id"] = parent_task_id if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="tasks.list", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/text_structure.py000066400000000000000000001172511506101734100265410ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class TextStructureClient(NamespacedClient): @_rewrite_parameters() def find_field_structure( self, *, field: str, index: str, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, documents_to_sample: t.Optional[int] = None, ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] ] ] = None, grok_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of a text field. Find the structure of a text field in an Elasticsearch index.

This API provides a starting point for extracting further information from log messages already ingested into Elasticsearch. For example, if you have ingested data into a very simple index that has just @timestamp and message fields, you can use this API to see what common structure exists in the message field.

The response from the API contains:

  • Sample messages.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen.

``_ :param field: The field that should be analyzed. :param index: The name of the index that contains the analyzed field. :param column_names: If `format` is set to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header row, columns are named "column1", "column2", "column3", for example. :param delimiter: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param documents_to_sample: The number of documents to include in the structural analysis. The minimum value is 2. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of the meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output. The intention in that situation is that a user who knows the meanings will rename the fields before using them. :param explain: If `true`, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to delimited and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param quote: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. """ if field is None: raise ValueError("Empty value passed for parameter 'field'") if index is None: raise ValueError("Empty value passed for parameter 'index'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_field_structure" __query: t.Dict[str, t.Any] = {} if field is not None: __query["field"] = field if index is not None: __query["index"] = index if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if documents_to_sample is not None: __query["documents_to_sample"] = documents_to_sample if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="text_structure.find_field_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("messages",), ) def find_message_structure( self, *, messages: t.Optional[t.Sequence[str]] = None, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[t.Union[str, t.Literal["disabled", "v1"]]] = None, error_trace: t.Optional[bool] = None, explain: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, format: t.Optional[ t.Union[ str, t.Literal["delimited", "ndjson", "semi_structured_text", "xml"] ] ] = None, grok_pattern: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of text messages. Find the structure of a list of text messages. The messages must contain data that is suitable to be ingested into Elasticsearch.

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Use this API rather than the find text structure API if your input text has already been split up into separate messages by some other process.

The response from the API contains:

  • Sample messages.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text. Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

If the structure finder produces unexpected results, specify the explain query parameter and an explanation will appear in the response. It helps determine why the returned structure was chosen.

``_ :param messages: The list of messages you want to analyze. :param column_names: If the format is `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. :param delimiter: If you the format is `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. :param explain: If this parameter is set to true, the response includes a field named `explanation`, which is an array of strings that indicate how the structure finder produced its result. :param format: The high level structure of the text. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If the format is `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param quote: If the format is `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If the format is `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is true. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires, it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text was ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the format is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and are separated from the `ss` by a period (`.`), comma (`,`), or colon (`:`). Spacing and punctuation is also permitted with the exception a question mark (`?`), newline, and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified, the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text, this will result in the structure finder treating the text as single-line messages. """ if messages is None and body is None: raise ValueError("Empty value passed for parameter 'messages'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_message_structure" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if explain is not None: __query["explain"] = explain if filter_path is not None: __query["filter_path"] = filter_path if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format if not __body: if messages is not None: __body["messages"] = messages __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.find_message_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_name="text_files", ) def find_structure( self, *, text_files: t.Optional[t.Sequence[t.Any]] = None, body: t.Optional[t.Sequence[t.Any]] = None, charset: t.Optional[str] = None, column_names: t.Optional[str] = None, delimiter: t.Optional[str] = None, ecs_compatibility: t.Optional[str] = None, explain: t.Optional[bool] = None, format: t.Optional[str] = None, grok_pattern: t.Optional[str] = None, has_header_row: t.Optional[bool] = None, line_merge_size_limit: t.Optional[int] = None, lines_to_sample: t.Optional[int] = None, quote: t.Optional[str] = None, should_trim_fields: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, timestamp_field: t.Optional[str] = None, timestamp_format: t.Optional[str] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Find the structure of a text file. The text file must contain data that is suitable to be ingested into Elasticsearch.

This API provides a starting point for ingesting data into Elasticsearch in a format that is suitable for subsequent use with other Elastic Stack functionality. Unlike other Elasticsearch endpoints, the data that is posted to this endpoint does not need to be UTF-8 encoded and in JSON format. It must, however, be text; binary text formats are not currently supported. The size is limited to the Elasticsearch HTTP receive buffer size, which defaults to 100 Mb.

The response from the API contains:

  • A couple of messages from the beginning of the text.
  • Statistics that reveal the most common values for all fields detected within the text and basic numeric statistics for numeric fields.
  • Information about the structure of the text, which is useful when you write ingest configurations to index it or similarly formatted text.
  • Appropriate mappings for an Elasticsearch index, which you could use to ingest the text.

All this information can be calculated by the structure finder with no guidance. However, you can optionally override some of the decisions about the text structure by specifying one or more query parameters.

``_ :param text_files: :param charset: The text's character set. It must be a character set that is supported by the JVM that Elasticsearch uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or `EUC-JP`. If this parameter is not specified, the structure finder chooses an appropriate character set. :param column_names: If you have set format to `delimited`, you can specify the column names in a comma-separated list. If this parameter is not specified, the structure finder uses the column names from the header row of the text. If the text does not have a header role, columns are named "column1", "column2", "column3", for example. :param delimiter: If you have set `format` to `delimited`, you can specify the character used to delimit the values in each row. Only a single character is supported; the delimiter cannot have multiple characters. By default, the API considers the following possibilities: comma, tab, semi-colon, and pipe (`|`). In this default scenario, all rows must have the same number of fields for the delimited format to be detected. If you specify a delimiter, up to 10% of the rows can have a different number of columns than the first row. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. This setting primarily has an impact when a whole message Grok pattern such as `%{CATALINALOG}` matches the input. If the structure finder identifies a common structure but has no idea of meaning then generic field names such as `path`, `ipaddress`, `field1`, and `field2` are used in the `grok_pattern` output, with the intention that a user who knows the meanings rename these fields before using it. :param explain: If this parameter is set to `true`, the response includes a field named explanation, which is an array of strings that indicate how the structure finder produced its result. If the structure finder produces unexpected results for some text, use this query parameter to help you determine why the returned structure was chosen. :param format: The high level structure of the text. Valid values are `ndjson`, `xml`, `delimited`, and `semi_structured_text`. By default, the API chooses the format. In this default scenario, all rows must have the same number of fields for a delimited format to be detected. If the format is set to `delimited` and the delimiter is not set, however, the API tolerates up to 5% of rows that have a different number of columns than the first row. :param grok_pattern: If you have set `format` to `semi_structured_text`, you can specify a Grok pattern that is used to extract fields from every message in the text. The name of the timestamp field in the Grok pattern must match what is specified in the `timestamp_field` parameter. If that parameter is not specified, the name of the timestamp field in the Grok pattern must match "timestamp". If `grok_pattern` is not specified, the structure finder creates a Grok pattern. :param has_header_row: If you have set `format` to `delimited`, you can use this parameter to indicate whether the column names are in the first row of the text. If this parameter is not specified, the structure finder guesses based on the similarity of the first row of the text to other rows. :param line_merge_size_limit: The maximum number of characters in a message when lines are merged to form messages while analyzing semi-structured text. If you have extremely long messages you may need to increase this, but be aware that this may lead to very long processing times if the way to group lines into messages is misdetected. :param lines_to_sample: The number of lines to include in the structural analysis, starting from the beginning of the text. The minimum is 2. If the value of this parameter is greater than the number of lines in the text, the analysis proceeds (as long as there are at least two lines in the text) for all of the lines. NOTE: The number of lines and the variation of the lines affects the speed of the analysis. For example, if you upload text where the first 1000 lines are all variations on the same message, the analysis will find more commonality than would be seen with a bigger sample. If possible, however, it is more efficient to upload sample text with more variety in the first 1000 lines than to request analysis of 100000 lines to achieve some variety. :param quote: If you have set `format` to `delimited`, you can specify the character used to quote the values in each row if they contain newlines or the delimiter character. Only a single character is supported. If this parameter is not specified, the default value is a double quote (`"`). If your delimited text format does not use quoting, a workaround is to set this argument to a character that does not appear anywhere in the sample. :param should_trim_fields: If you have set `format` to `delimited`, you can specify whether values between delimiters should have whitespace trimmed from them. If this parameter is not specified and the delimiter is pipe (`|`), the default value is `true`. Otherwise, the default value is `false`. :param timeout: The maximum amount of time that the structure analysis can take. If the analysis is still running when the timeout expires then it will be stopped. :param timestamp_field: The name of the field that contains the primary timestamp of each record in the text. In particular, if the text were ingested into an index, this is the field that would be used to populate the `@timestamp` field. If the `format` is `semi_structured_text`, this field must match the name of the appropriate extraction in the `grok_pattern`. Therefore, for semi-structured text, it is best not to specify this parameter unless `grok_pattern` is also specified. For structured text, if you specify this parameter, the field must exist within the text. If this parameter is not specified, the structure finder makes a decision about which field (if any) is the primary timestamp field. For structured text, it is not compulsory to have a timestamp in the text. :param timestamp_format: The Java time format of the timestamp field in the text. Only a subset of Java time format letter groups are supported: * `a` * `d` * `dd` * `EEE` * `EEEE` * `H` * `HH` * `h` * `M` * `MM` * `MMM` * `MMMM` * `mm` * `ss` * `XX` * `XXX` * `yy` * `yyyy` * `zzz` Additionally `S` letter groups (fractional seconds) of length one to nine are supported providing they occur after `ss` and separated from the `ss` by a `.`, `,` or `:`. Spacing and punctuation is also permitted with the exception of `?`, newline and carriage return, together with literal text enclosed in single quotes. For example, `MM/dd HH.mm.ss,SSSSSS 'in' yyyy` is a valid override format. One valuable use case for this parameter is when the format is semi-structured text, there are multiple timestamp formats in the text, and you know which format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. Another is when the timestamp format is one that the structure finder does not consider by default. If this parameter is not specified, the structure finder chooses the best format from a built-in set. If the special value `null` is specified the structure finder will not look for a primary timestamp in the text. When the format is semi-structured text this will result in the structure finder treating the text as single-line messages. """ if text_files is None and body is None: raise ValueError( "Empty value passed for parameters 'text_files' and 'body', one of them should be set." ) elif text_files is not None and body is not None: raise ValueError("Cannot set both 'text_files' and 'body'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/find_structure" __query: t.Dict[str, t.Any] = {} if charset is not None: __query["charset"] = charset if column_names is not None: __query["column_names"] = column_names if delimiter is not None: __query["delimiter"] = delimiter if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if explain is not None: __query["explain"] = explain if format is not None: __query["format"] = format if grok_pattern is not None: __query["grok_pattern"] = grok_pattern if has_header_row is not None: __query["has_header_row"] = has_header_row if line_merge_size_limit is not None: __query["line_merge_size_limit"] = line_merge_size_limit if lines_to_sample is not None: __query["lines_to_sample"] = lines_to_sample if quote is not None: __query["quote"] = quote if should_trim_fields is not None: __query["should_trim_fields"] = should_trim_fields if timeout is not None: __query["timeout"] = timeout if timestamp_field is not None: __query["timestamp_field"] = timestamp_field if timestamp_format is not None: __query["timestamp_format"] = timestamp_format __body = text_files if text_files is not None else body __headers = { "accept": "application/json", "content-type": "application/x-ndjson", } return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.find_structure", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("grok_pattern", "text"), ) def test_grok_pattern( self, *, grok_pattern: t.Optional[str] = None, text: t.Optional[t.Sequence[str]] = None, ecs_compatibility: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Test a Grok pattern. Test a Grok pattern on one or more lines of text. The API indicates whether the lines match the pattern together with the offsets and lengths of the matched substrings.

``_ :param grok_pattern: The Grok pattern to run on the text. :param text: The lines of text to run the Grok pattern on. :param ecs_compatibility: The mode of compatibility with ECS compliant Grok patterns. Use this parameter to specify whether to use ECS Grok patterns instead of legacy ones when the structure finder creates a Grok pattern. Valid values are `disabled` and `v1`. """ if grok_pattern is None and body is None: raise ValueError("Empty value passed for parameter 'grok_pattern'") if text is None and body is None: raise ValueError("Empty value passed for parameter 'text'") __path_parts: t.Dict[str, str] = {} __path = "/_text_structure/test_grok_pattern" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if ecs_compatibility is not None: __query["ecs_compatibility"] = ecs_compatibility if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if grok_pattern is not None: __body["grok_pattern"] = grok_pattern if text is not None: __body["text"] = text __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="text_structure.test_grok_pattern", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/transform.py000066400000000000000000001335111506101734100254450ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class TransformClient(NamespacedClient): @_rewrite_parameters() def delete_transform( self, *, transform_id: str, delete_dest_index: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a transform.

``_ :param transform_id: Identifier for the transform. :param delete_dest_index: If this value is true, the destination index is deleted together with the transform. If false, the destination index will not be deleted :param force: If this value is false, the transform must be stopped before it can be deleted. If true, the transform is deleted regardless of its current state. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' __query: t.Dict[str, t.Any] = {} if delete_dest_index is not None: __query["delete_dest_index"] = delete_dest_index if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="transform.delete_transform", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_transform( self, *, transform_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, exclude_generated: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get transforms. Get configuration information for transforms.

``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param exclude_generated: Excludes fields that were automatically added when creating the transform. This allows the configuration to be in an acceptable format to be retrieved and then added to another cluster. :param from_: Skips the specified number of transforms. :param size: Specifies the maximum number of transforms to obtain. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' else: __path_parts = {} __path = "/_transform" __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if exclude_generated is not None: __query["exclude_generated"] = exclude_generated if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="transform.get_transform", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def get_transform_stats( self, *, transform_id: t.Union[str, t.Sequence[str]], allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, size: t.Optional[int] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get transform stats.

Get usage information for transforms.

``_ :param transform_id: Identifier for the transform. It can be a transform identifier or a wildcard expression. You can get information for all transforms by using `_all`, by specifying `*` as the ``, or by omitting the ``. :param allow_no_match: Specifies what to do when the request: 1. Contains wildcard expressions and there are no transforms that match. 2. Contains the _all string or no identifiers and there are no matches. 3. Contains wildcard expressions and there are only partial matches. If this parameter is false, the request returns a 404 status code when there are no matches or only partial matches. :param from_: Skips the specified number of transforms. :param size: Specifies the maximum number of transforms to obtain. :param timeout: Controls the time to wait for the stats """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_stats' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if size is not None: __query["size"] = size if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="transform.get_transform_stats", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "dest", "frequency", "latest", "pivot", "retention_policy", "settings", "source", "sync", ), ) def preview_transform( self, *, transform_id: t.Optional[str] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, latest: t.Optional[t.Mapping[str, t.Any]] = None, pivot: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Mapping[str, t.Any]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Preview a transform. Generates a preview of the results that you will get when you create a transform with the same configuration.

It returns a maximum of 100 results. The calculations are based on all the current data in the source index. It also generates a list of mappings and settings for the destination index. These values are determined based on the field types of the source index and the transform aggregations.

``_ :param transform_id: Identifier for the transform to preview. If you specify this path parameter, you cannot provide transform configuration details in the request body. :param description: Free text description of the transform. :param dest: The destination for the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. :param latest: The latest method transforms the data by finding the latest document for each unique key. :param pivot: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param source: The source of the data for the transform. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] if transform_id not in SKIP_IN_PATH: __path_parts = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_preview' else: __path_parts = {} __path = "/_transform/_preview" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if frequency is not None: __body["frequency"] = frequency if latest is not None: __body["latest"] = latest if pivot is not None: __body["pivot"] = pivot if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if source is not None: __body["source"] = source if sync is not None: __body["sync"] = sync if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.preview_transform", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "dest", "source", "description", "frequency", "latest", "meta", "pivot", "retention_policy", "settings", "sync", ), parameter_aliases={"_meta": "meta"}, ) def put_transform( self, *, transform_id: str, dest: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, defer_validation: t.Optional[bool] = None, description: t.Optional[str] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, latest: t.Optional[t.Mapping[str, t.Any]] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pivot: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Mapping[str, t.Any]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create a transform. Creates a transform.

A transform copies data from source indices, transforms it, and persists it into an entity-centric destination index. You can also think of the destination index as a two-dimensional tabular data structure (known as a data frame). The ID for each document in the data frame is generated from a hash of the entity, so there is a unique row per entity.

You must choose either the latest or pivot method for your transform; you cannot use both in a single transform. If you choose to use the pivot method for your transform, the entities are defined by the set of group_by fields in the pivot object. If you choose to use the latest method, the entities are defined by the unique_key field values in the latest object.

You must have create_index, index, and read privileges on the destination index and read and view_index_metadata privileges on the source indices. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations.

NOTE: You must use Kibana or this API to create a transform. Do not add a transform directly into any .transform-internal* indices using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users any privileges on .transform-internal* indices. If you used transforms prior to 7.5, also do not give users any privileges on .data-frame-internal* indices.

``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. :param dest: The destination for the transform. :param source: The source of the data for the transform. :param defer_validation: When the transform is created, a series of validations occur to ensure its success. For example, there is a check for the existence of the source indices and a check that the destination index is not part of the source index pattern. You can use this parameter to skip the checks, for example when the source index does not exist until after the transform is created. The validations are always run when you start the transform, however, with the exception of privilege checks. :param description: Free text description of the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is `1s` and the maximum is `1h`. :param latest: The latest method transforms the data by finding the latest document for each unique key. :param meta: Defines optional transform metadata. :param pivot: The pivot method transforms the data by aggregating and grouping it. These objects define the group by fields and the aggregation to reduce the data. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") if dest is None and body is None: raise ValueError("Empty value passed for parameter 'dest'") if source is None and body is None: raise ValueError("Empty value passed for parameter 'source'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_validation is not None: __query["defer_validation"] = defer_validation if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if dest is not None: __body["dest"] = dest if source is not None: __body["source"] = source if description is not None: __body["description"] = description if frequency is not None: __body["frequency"] = frequency if latest is not None: __body["latest"] = latest if meta is not None: __body["_meta"] = meta if pivot is not None: __body["pivot"] = pivot if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if sync is not None: __body["sync"] = sync __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.put_transform", path_parts=__path_parts, ) @_rewrite_parameters() def reset_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Reset a transform.

Before you can reset it, you must stop it; alternatively, use the force query parameter. If the destination index was created by the transform, it is deleted.

``_ :param transform_id: Identifier for the transform. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 character limit and must start and end with alphanumeric characters. :param force: If this value is `true`, the transform is reset regardless of its current state. If it's `false`, the transform must be stopped before it can be reset. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_reset' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.reset_transform", path_parts=__path_parts, ) @_rewrite_parameters() def schedule_now_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Schedule a transform to start now.

Instantly run a transform to process data. If you run this API, the transform will process the new data instantly, without waiting for the configured frequency interval. After the API is called, the transform will be processed again at now + frequency unless the API is called again in the meantime.

``_ :param transform_id: Identifier for the transform. :param timeout: Controls the time to wait for the scheduling to take place """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_schedule_now' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.schedule_now_transform", path_parts=__path_parts, ) @_rewrite_parameters() def set_upgrade_mode( self, *, enabled: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Set upgrade_mode for transform indices. Sets a cluster wide upgrade_mode setting that prepares transform indices for an upgrade. When upgrading your cluster, in some circumstances you must restart your nodes and reindex your transform indices. In those circumstances, there must be no transforms running. You can close the transforms, do the upgrade, then open all the transforms again. Alternatively, you can use this API to temporarily halt tasks associated with the transforms and prevent new transforms from opening. You can also use this API during upgrades that do not require you to reindex your transform indices, though stopping transforms is not a requirement in that case. You can see the current value for the upgrade_mode setting by using the get transform info API.

``_ :param enabled: When `true`, it enables `upgrade_mode` which temporarily halts all transform tasks and prohibits new transform tasks from starting. :param timeout: The time to wait for the request to be completed. """ __path_parts: t.Dict[str, str] = {} __path = "/_transform/set_upgrade_mode" __query: t.Dict[str, t.Any] = {} if enabled is not None: __query["enabled"] = enabled if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.set_upgrade_mode", path_parts=__path_parts, ) @_rewrite_parameters( parameter_aliases={"from": "from_"}, ) def start_transform( self, *, transform_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[str] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start a transform.

When you start a transform, it creates the destination index if it does not already exist. The number_of_shards is set to 1 and the auto_expand_replicas is set to 0-1. If it is a pivot transform, it deduces the mapping definitions for the destination index from the source indices and the transform aggregations. If fields in the destination index are derived from scripts (as in the case of scripted_metric or bucket_script aggregations), the transform uses dynamic mappings unless an index template exists. If it is a latest transform, it does not deduce mapping definitions; it uses dynamic mappings. To use explicit mappings, create the destination index before you start the transform. Alternatively, you can create an index template, though it does not affect the deduced mappings in a pivot transform.

When the transform starts, a series of validations occur to ensure its success. If you deferred validation when you created the transform, they occur when you start the transform—​with the exception of privilege checks. When Elasticsearch security features are enabled, the transform remembers which roles the user that created it had at the time of creation and uses those same roles. If those roles do not have the required privileges on the source and destination indices, the transform fails when it attempts unauthorized operations.

``_ :param transform_id: Identifier for the transform. :param from_: Restricts the set of transformed entities to those changed after this time. Relative times like now-30d are supported. Only applicable for continuous transforms. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_start' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if from_ is not None: __query["from"] = from_ if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.start_transform", path_parts=__path_parts, ) @_rewrite_parameters() def stop_transform( self, *, transform_id: str, allow_no_match: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, force: t.Optional[bool] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, wait_for_checkpoint: t.Optional[bool] = None, wait_for_completion: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop transforms. Stops one or more transforms.

``_ :param transform_id: Identifier for the transform. To stop multiple transforms, use a comma-separated list or a wildcard expression. To stop all transforms, use `_all` or `*` as the identifier. :param allow_no_match: Specifies what to do when the request: contains wildcard expressions and there are no transforms that match; contains the `_all` string or no identifiers and there are no matches; contains wildcard expressions and there are only partial matches. If it is true, the API returns a successful acknowledgement message when there are no matches. When there are only partial matches, the API stops the appropriate transforms. If it is false, the request returns a 404 status code when there are no matches or only partial matches. :param force: If it is true, the API forcefully stops the transforms. :param timeout: Period to wait for a response when `wait_for_completion` is `true`. If no response is received before the timeout expires, the request returns a timeout exception. However, the request continues processing and eventually moves the transform to a STOPPED state. :param wait_for_checkpoint: If it is true, the transform does not completely stop until the current checkpoint is completed. If it is false, the transform stops as soon as possible. :param wait_for_completion: If it is true, the API blocks until the indexer state completely stops. If it is false, the API returns immediately and the indexer is stopped asynchronously in the background. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_stop' __query: t.Dict[str, t.Any] = {} if allow_no_match is not None: __query["allow_no_match"] = allow_no_match if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if force is not None: __query["force"] = force if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if wait_for_checkpoint is not None: __query["wait_for_checkpoint"] = wait_for_checkpoint if wait_for_completion is not None: __query["wait_for_completion"] = wait_for_completion __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.stop_transform", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "description", "dest", "frequency", "meta", "retention_policy", "settings", "source", "sync", ), parameter_aliases={"_meta": "meta"}, ) def update_transform( self, *, transform_id: str, defer_validation: t.Optional[bool] = None, description: t.Optional[str] = None, dest: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, frequency: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, human: t.Optional[bool] = None, meta: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, retention_policy: t.Optional[t.Union[None, t.Mapping[str, t.Any]]] = None, settings: t.Optional[t.Mapping[str, t.Any]] = None, source: t.Optional[t.Mapping[str, t.Any]] = None, sync: t.Optional[t.Mapping[str, t.Any]] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update a transform. Updates certain properties of a transform.

All updated properties except description do not take effect until after the transform starts the next checkpoint, thus there is data consistency in each checkpoint. To use this API, you must have read and view_index_metadata privileges for the source indices. You must also have index and read privileges for the destination index. When Elasticsearch security features are enabled, the transform remembers which roles the user who updated it had at the time of update and runs with those privileges.

``_ :param transform_id: Identifier for the transform. :param defer_validation: When true, deferrable validations are not run. This behavior may be desired if the source index does not exist until after the transform is created. :param description: Free text description of the transform. :param dest: The destination for the transform. :param frequency: The interval between checks for changes in the source indices when the transform is running continuously. Also determines the retry interval in the event of transient failures while the transform is searching or indexing. The minimum value is 1s and the maximum is 1h. :param meta: Defines optional transform metadata. :param retention_policy: Defines a retention policy for the transform. Data that meets the defined criteria is deleted from the destination index. :param settings: Defines optional transform settings. :param source: The source of the data for the transform. :param sync: Defines the properties transforms require to run continuously. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ if transform_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'transform_id'") __path_parts: t.Dict[str, str] = {"transform_id": _quote(transform_id)} __path = f'/_transform/{__path_parts["transform_id"]}/_update' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if defer_validation is not None: __query["defer_validation"] = defer_validation if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if description is not None: __body["description"] = description if dest is not None: __body["dest"] = dest if frequency is not None: __body["frequency"] = frequency if meta is not None: __body["_meta"] = meta if retention_policy is not None: __body["retention_policy"] = retention_policy if settings is not None: __body["settings"] = settings if source is not None: __body["source"] = source if sync is not None: __body["sync"] = sync __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="transform.update_transform", path_parts=__path_parts, ) @_rewrite_parameters() def upgrade_transforms( self, *, dry_run: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Upgrade all transforms.

Transforms are compatible across minor versions and between supported major versions. However, over time, the format of transform configuration information may change. This API identifies transforms that have a legacy configuration format and upgrades them to the latest version. It also cleans up the internal data structures that store the transform state and checkpoints. The upgrade does not affect the source and destination indices. The upgrade also does not affect the roles that transforms use when Elasticsearch security features are enabled; the role used to read source data and write to the destination index remains unchanged.

If a transform upgrade step fails, the upgrade stops and an error is returned about the underlying issue. Resolve the issue then re-run the process again. A summary is returned when the upgrade is finished.

To ensure continuous transforms remain running during a major version upgrade of the cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade transforms before upgrading the cluster. You may want to perform a recent cluster backup prior to the upgrade.

``_ :param dry_run: When true, the request checks for updates but does not run them. :param timeout: Period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_transform/_upgrade" __query: t.Dict[str, t.Any] = {} if dry_run is not None: __query["dry_run"] = dry_run if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="transform.upgrade_transforms", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/utils.py000066400000000000000000000414431506101734100245740ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import inspect import urllib.parse import warnings from datetime import date, datetime from enum import Enum, auto from functools import wraps from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Collection, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, ) from elastic_transport import ( AsyncTransport, HttpHeaders, NodeConfig, RequestsHttpNode, SniffOptions, Transport, ) from elastic_transport.client_utils import ( DEFAULT, client_meta_version, create_user_agent, parse_cloud_id, url_to_node_config, ) from ..._version import __versionstr__ from ...compat import to_bytes, to_str, warn_stacklevel from ...exceptions import GeneralAvailabilityWarning if TYPE_CHECKING: from ._base import NamespacedClient # parts of URL to be omitted SKIP_IN_PATH: Collection[Any] = (None, "", b"", [], ()) # To be passed to 'client_meta_service' on the Transport CLIENT_META_SERVICE = ("es", client_meta_version(__versionstr__)) # Default User-Agent used by the client USER_AGENT = create_user_agent("elasticsearch-py", __versionstr__) class Stability(Enum): STABLE = auto() BETA = auto() EXPERIMENTAL = auto() _TYPE_HOSTS = Union[ str, Sequence[Union[str, Mapping[str, Union[str, int]], NodeConfig]] ] _TYPE_BODY = Union[bytes, str, Dict[str, Any]] _TYPE_ASYNC_SNIFF_CALLBACK = Callable[ [AsyncTransport, SniffOptions], Awaitable[List[NodeConfig]] ] _TYPE_SYNC_SNIFF_CALLBACK = Callable[[Transport, SniffOptions], List[NodeConfig]] _TRANSPORT_OPTIONS = { "api_key", "http_auth", "request_timeout", "opaque_id", "headers", "ignore", } F = TypeVar("F", bound=Callable[..., Any]) def client_node_configs( hosts: Optional[_TYPE_HOSTS], cloud_id: Optional[str], requests_session_auth: Optional[Any] = None, **kwargs: Any, ) -> List[NodeConfig]: if cloud_id is not None: if hosts is not None: raise ValueError( "The 'cloud_id' and 'hosts' parameters are mutually exclusive" ) node_configs = cloud_id_to_node_configs(cloud_id) else: assert hosts is not None node_configs = hosts_to_node_configs(hosts) # Remove all values which are 'DEFAULT' to avoid overwriting actual defaults. node_options = {k: v for k, v in kwargs.items() if v is not DEFAULT} # Set the 'User-Agent' default header. headers = HttpHeaders(node_options.pop("headers", ())) headers.setdefault("user-agent", USER_AGENT) node_options["headers"] = headers # If a custom Requests AuthBase is passed we set that via '_extras'. if requests_session_auth is not None: node_options.setdefault("_extras", {})[ "requests.session.auth" ] = requests_session_auth def apply_node_options(node_config: NodeConfig) -> NodeConfig: """Needs special handling of headers since .replace() wipes out existing headers""" headers = node_config.headers.copy() # type: ignore[attr-defined] headers_to_add = node_options.pop("headers", ()) if headers_to_add: headers.update(headers_to_add) headers.setdefault("user-agent", USER_AGENT) headers.freeze() node_options["headers"] = headers return node_config.replace(**node_options) return [apply_node_options(node_config) for node_config in node_configs] def hosts_to_node_configs(hosts: _TYPE_HOSTS) -> List[NodeConfig]: """Transforms the many formats of 'hosts' into NodeConfigs""" # To make the logic here simpler we reroute everything to be List[X] if isinstance(hosts, str): return hosts_to_node_configs([hosts]) node_configs: List[NodeConfig] = [] for host in hosts: if isinstance(host, NodeConfig): node_configs.append(host) elif isinstance(host, str): node_configs.append(url_to_node_config(host)) elif isinstance(host, Mapping): node_configs.append(host_mapping_to_node_config(host)) else: raise ValueError( "'hosts' must be a list of URLs, NodeConfigs, or dictionaries" ) return node_configs def host_mapping_to_node_config(host: Mapping[str, Union[str, int]]) -> NodeConfig: """Converts an old-style dictionary host specification to a NodeConfig""" allow_hosts_keys = { "scheme", "host", "port", "path_prefix", } disallowed_keys = set(host.keys()).difference(allow_hosts_keys) if disallowed_keys: bad_keys_used = "', '".join(sorted(disallowed_keys)) allowed_keys = "', '".join(sorted(allow_hosts_keys)) raise ValueError( f"Can't specify the options '{bad_keys_used}' via a " f"dictionary in 'hosts', only '{allowed_keys}' options " "are allowed" ) options = dict(host) return NodeConfig(**options) # type: ignore[arg-type] def cloud_id_to_node_configs(cloud_id: str) -> List[NodeConfig]: """Transforms an Elastic Cloud ID into a NodeConfig""" es_addr = parse_cloud_id(cloud_id).es_address if es_addr is None or not all(es_addr): raise ValueError("Cloud ID missing host and port information for Elasticsearch") host, port = es_addr return [ NodeConfig( scheme="https", host=host, port=port, http_compress=True, ) ] def _base64_auth_header(auth_value: Union[str, List[str], Tuple[str, str]]) -> str: """Takes either a 2-tuple or a base64-encoded string and returns a base64-encoded string to be used as an HTTP authorization header. """ if isinstance(auth_value, (list, tuple)): return base64.b64encode(to_bytes(":".join(auth_value))).decode("ascii") return to_str(auth_value) def _escape(value: Any) -> str: """ Escape a single value of a URL string or a query parameter. If it is a list or tuple, turn it into a comma-separated string first. """ # make sequences into comma-separated stings if isinstance(value, (list, tuple)): value = ",".join([_escape(item) for item in value]) # dates and datetimes into isoformat elif isinstance(value, (date, datetime)): value = value.isoformat() # make bools into true/false strings elif isinstance(value, bool): value = str(value).lower() elif isinstance(value, bytes): return value.decode("utf-8", "surrogatepass") if not isinstance(value, str): return str(value) return value def _quote(value: Any) -> str: return urllib.parse.quote(_escape(value), ",*") def _quote_query(query: Mapping[str, Any]) -> str: return "&".join([f"{k}={_quote(v)}" for k, v in query.items()]) def _merge_kwargs_no_duplicates(kwargs: Dict[str, Any], values: Dict[str, Any]) -> None: for key, val in values.items(): if key in kwargs: raise ValueError( f"Received multiple values for '{key}', specify parameters " "directly instead of using 'params'" ) kwargs[key] = val def _merge_body_fields_no_duplicates( body: _TYPE_BODY, kwargs: Dict[str, Any], body_fields: Tuple[str, ...] ) -> bool: mixed_body_and_params = False for key in list(kwargs.keys()): if key in body_fields: if isinstance(body, (str, bytes)): raise ValueError( "Couldn't merge 'body' with other parameters as it wasn't a mapping." ) if key in body: raise ValueError( f"Received multiple values for '{key}', specify parameters " "using either body or parameters, not both." ) warnings.warn( f"Received '{key}' via a specific parameter in the presence of a " "'body' parameter, which is deprecated and will be removed in a future " "version. Instead, use only 'body' or only specific parameters.", category=DeprecationWarning, stacklevel=warn_stacklevel(), ) body[key] = kwargs.pop(key) mixed_body_and_params = True return mixed_body_and_params def _rewrite_parameters( body_name: Optional[str] = None, body_fields: Optional[Tuple[str, ...]] = None, parameter_aliases: Optional[Dict[str, str]] = None, ignore_deprecated_options: Optional[Set[str]] = None, ) -> Callable[[F], F]: def wrapper(api: F) -> F: @wraps(api) def wrapped(*args: Any, **kwargs: Any) -> Any: # Let's give a nicer error message when users pass positional arguments. if len(args) >= 2: raise TypeError( "Positional arguments can't be used with Elasticsearch API methods. " "Instead only use keyword arguments." ) # We merge 'params' first as transport options can be specified using params. if "params" in kwargs and ( not ignore_deprecated_options or "params" not in ignore_deprecated_options ): params = kwargs.pop("params") if params: if not hasattr(params, "items"): raise ValueError( "Couldn't merge 'params' with other parameters as it wasn't a mapping. " "Instead of using 'params' use individual API parameters" ) warnings.warn( "The 'params' parameter is deprecated and will be removed " "in a future version. Instead use individual parameters.", category=DeprecationWarning, stacklevel=warn_stacklevel(), ) _merge_kwargs_no_duplicates(kwargs, params) maybe_transport_options = _TRANSPORT_OPTIONS.intersection(kwargs) if maybe_transport_options: transport_options = {} for option in maybe_transport_options: if ( ignore_deprecated_options and option in ignore_deprecated_options ): continue try: option_rename = option if option == "ignore": option_rename = "ignore_status" transport_options[option_rename] = kwargs.pop(option) except KeyError: pass if transport_options: warnings.warn( "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.", category=DeprecationWarning, stacklevel=warn_stacklevel(), ) client = args[0] # Namespaced clients need to unwrapped. namespaced_client: Optional[Type["NamespacedClient"]] = None if hasattr(client, "_client"): namespaced_client = type(client) client = client._client client = client.options(**transport_options) # Re-wrap the client if we unwrapped due to being namespaced. if namespaced_client is not None: client = namespaced_client(client) args = (client,) + args[1:] if "body" in kwargs and ( not ignore_deprecated_options or "body" not in ignore_deprecated_options ): body: Optional[_TYPE_BODY] = kwargs.pop("body") mixed_body_and_params = False if body is not None: if body_name: if body_name in kwargs: raise TypeError( f"Can't use '{body_name}' and 'body' parameters together because '{body_name}' " "is an alias for 'body'. Instead you should only use the " f"'{body_name}' parameter. See https://github.com/elastic/elasticsearch-py/" "issues/1698 for more information" ) kwargs[body_name] = body elif body_fields is not None: mixed_body_and_params = _merge_body_fields_no_duplicates( body, kwargs, body_fields ) kwargs["body"] = body if parameter_aliases and not isinstance(body, (str, bytes)): for alias, rename_to in parameter_aliases.items(): if rename_to in body: body[alias] = body.pop(rename_to) # If body and params are mixed, the alias may come from a param, # in which case the warning below will not make sense. if not mixed_body_and_params: warnings.warn( f"Using '{rename_to}' alias in 'body' is deprecated and will be removed " f"in a future version of elasticsearch-py. Use '{alias}' directly instead. " "See https://github.com/elastic/elasticsearch-py/issues/1698 for more information", category=DeprecationWarning, stacklevel=2, ) if parameter_aliases: for alias, rename_to in parameter_aliases.items(): try: kwargs[rename_to] = kwargs.pop(alias) except KeyError: pass return api(*args, **kwargs) return wrapped # type: ignore[return-value] return wrapper def _stability_warning( stability: Stability, version: Optional[str] = None, message: Optional[str] = None, ) -> Callable[[F], F]: def wrapper(api: F) -> F: @wraps(api) def wrapped(*args: Any, **kwargs: Any) -> Any: if stability == Stability.BETA: warnings.warn( "This API is in beta and is subject to change. " "The design and code is less mature than official GA features and is being provided as-is with no warranties. " "Beta features are not subject to the support SLA of official GA features.", category=GeneralAvailabilityWarning, stacklevel=warn_stacklevel(), ) elif stability == Stability.EXPERIMENTAL: warnings.warn( "This API is in technical preview and may be changed or removed in a future release. " "Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.", category=GeneralAvailabilityWarning, stacklevel=warn_stacklevel(), ) return api(*args, **kwargs) return wrapped # type: ignore[return-value] return wrapper def is_requests_http_auth(http_auth: Any) -> bool: """Detect if an http_auth value is a custom Requests auth object""" try: from requests.auth import AuthBase return isinstance(http_auth, AuthBase) except ImportError: pass return False def is_requests_node_class(node_class: Any) -> bool: """Detect if 'RequestsHttpNode' would be used given the setting of 'node_class'""" return ( node_class is not None and node_class is not DEFAULT and ( node_class == "requests" or ( inspect.isclass(node_class) and issubclass(node_class, RequestsHttpNode) ) ) ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/watcher.py000066400000000000000000001124201506101734100250630ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import SKIP_IN_PATH, _quote, _rewrite_parameters class WatcherClient(NamespacedClient): @_rewrite_parameters() def ack_watch( self, *, watch_id: str, action_id: t.Optional[t.Union[str, t.Sequence[str]]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Acknowledge a watch. Acknowledging a watch enables you to manually throttle the execution of the watch's actions.

The acknowledgement state of an action is stored in the status.actions.<id>.ack.state structure.

IMPORTANT: If the specified watch is currently being executed, this API will return an error The reason for this behavior is to prevent overwriting the watch status from a watch execution.

Acknowledging an action throttles further executions of that action until its ack.state is reset to awaits_successful_execution. This happens when the condition of the watch is not met (the condition evaluates to false). To demonstrate how throttling works in practice and how it can be configured for individual actions within a watch, refer to External documentation.

``_ :param watch_id: The watch identifier. :param action_id: A comma-separated list of the action identifiers to acknowledge. If you omit this parameter, all of the actions of the watch are acknowledged. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] if watch_id not in SKIP_IN_PATH and action_id not in SKIP_IN_PATH: __path_parts = { "watch_id": _quote(watch_id), "action_id": _quote(action_id), } __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_ack/{__path_parts["action_id"]}' elif watch_id not in SKIP_IN_PATH: __path_parts = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_ack' else: raise ValueError("Couldn't find a path for the given parameters") __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.ack_watch", path_parts=__path_parts, ) @_rewrite_parameters() def activate_watch( self, *, watch_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Activate a watch. A watch can be either active or inactive.

``_ :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_activate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.activate_watch", path_parts=__path_parts, ) @_rewrite_parameters() def deactivate_watch( self, *, watch_id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Deactivate a watch. A watch can be either active or inactive.

``_ :param watch_id: The watch identifier. """ if watch_id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'watch_id'") __path_parts: t.Dict[str, str] = {"watch_id": _quote(watch_id)} __path = f'/_watcher/watch/{__path_parts["watch_id"]}/_deactivate' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, endpoint_id="watcher.deactivate_watch", path_parts=__path_parts, ) @_rewrite_parameters() def delete_watch( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Delete a watch. When the watch is removed, the document representing the watch in the .watches index is gone and it will never be run again.

Deleting a watch does not delete any watch execution records related to this watch from the watch history.

IMPORTANT: Deleting a watch must be done by using only this API. Do not delete the watch directly from the .watches index using the Elasticsearch delete document API When Elasticsearch security features are enabled, make sure no write privileges are granted to anyone for the .watches index.

``_ :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "DELETE", __path, params=__query, headers=__headers, endpoint_id="watcher.delete_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "action_modes", "alternative_input", "ignore_condition", "record_execution", "simulated_actions", "trigger_data", "watch", ), ) def execute_watch( self, *, id: t.Optional[str] = None, action_modes: t.Optional[ t.Mapping[ str, t.Union[ str, t.Literal[ "execute", "force_execute", "force_simulate", "simulate", "skip" ], ], ] ] = None, alternative_input: t.Optional[t.Mapping[str, t.Any]] = None, debug: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, ignore_condition: t.Optional[bool] = None, pretty: t.Optional[bool] = None, record_execution: t.Optional[bool] = None, simulated_actions: t.Optional[t.Mapping[str, t.Any]] = None, trigger_data: t.Optional[t.Mapping[str, t.Any]] = None, watch: t.Optional[t.Mapping[str, t.Any]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Run a watch. This API can be used to force execution of the watch outside of its triggering logic or to simulate the watch execution for debugging purposes.

For testing and debugging purposes, you also have fine-grained control on how the watch runs. You can run the watch without running all of its actions or alternatively by simulating them. You can also force execution by ignoring the watch condition and control whether a watch record would be written to the watch history after it runs.

You can use the run watch API to run watches that are not yet registered by specifying the watch definition inline. This serves as great tool for testing and debugging your watches prior to adding them to Watcher.

When Elasticsearch security features are enabled on your cluster, watches are run with the privileges of the user that stored the watches. If your user is allowed to read index a, but not index b, then the exact same set of rules will apply during execution of a watch.

When using the run watch API, the authorization data of the user that called the API will be used as a base, instead of the information who stored the watch. Refer to the external documentation for examples of watch execution requests, including existing, customized, and inline watches.

``_ :param id: The watch identifier. :param action_modes: Determines how to handle the watch actions as part of the watch execution. :param alternative_input: When present, the watch uses this object as a payload instead of executing its own input. :param debug: Defines whether the watch runs in debug mode. :param ignore_condition: When set to `true`, the watch execution uses the always condition. This can also be specified as an HTTP parameter. :param record_execution: When set to `true`, the watch record representing the watch execution result is persisted to the `.watcher-history` index for the current time. In addition, the status of the watch is updated, possibly throttling subsequent runs. This can also be specified as an HTTP parameter. :param simulated_actions: :param trigger_data: This structure is parsed as the data of the trigger event that will be used during the watch execution. :param watch: When present, this watch is used instead of the one specified in the request. This watch is not persisted to the index and `record_execution` cannot be set. """ __path_parts: t.Dict[str, str] if id not in SKIP_IN_PATH: __path_parts = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}/_execute' else: __path_parts = {} __path = "/_watcher/watch/_execute" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if debug is not None: __query["debug"] = debug if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if action_modes is not None: __body["action_modes"] = action_modes if alternative_input is not None: __body["alternative_input"] = alternative_input if ignore_condition is not None: __body["ignore_condition"] = ignore_condition if record_execution is not None: __body["record_execution"] = record_execution if simulated_actions is not None: __body["simulated_actions"] = simulated_actions if trigger_data is not None: __body["trigger_data"] = trigger_data if watch is not None: __body["watch"] = watch if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.execute_watch", path_parts=__path_parts, ) @_rewrite_parameters() def get_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Watcher index settings. Get settings for the Watcher internal index (.watches). Only a subset of settings are shown, for example index.auto_expand_replicas and index.number_of_replicas.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/settings" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.get_settings", path_parts=__path_parts, ) @_rewrite_parameters() def get_watch( self, *, id: str, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get a watch.

``_ :param id: The watch identifier. """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.get_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=( "actions", "condition", "input", "metadata", "throttle_period", "throttle_period_in_millis", "transform", "trigger", ), ) def put_watch( self, *, id: str, actions: t.Optional[t.Mapping[str, t.Mapping[str, t.Any]]] = None, active: t.Optional[bool] = None, condition: t.Optional[t.Mapping[str, t.Any]] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, if_primary_term: t.Optional[int] = None, if_seq_no: t.Optional[int] = None, input: t.Optional[t.Mapping[str, t.Any]] = None, metadata: t.Optional[t.Mapping[str, t.Any]] = None, pretty: t.Optional[bool] = None, throttle_period: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, throttle_period_in_millis: t.Optional[t.Any] = None, transform: t.Optional[t.Mapping[str, t.Any]] = None, trigger: t.Optional[t.Mapping[str, t.Any]] = None, version: t.Optional[int] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Create or update a watch. When a watch is registered, a new document that represents the watch is added to the .watches index and its trigger is immediately registered with the relevant trigger engine. Typically for the schedule trigger, the scheduler is the trigger engine.

IMPORTANT: You must use Kibana or this API to create a watch. Do not add a watch directly to the .watches index by using the Elasticsearch index API. If Elasticsearch security features are enabled, do not give users write privileges on the .watches index.

When you add a watch you can also define its initial active state by setting the active parameter.

When Elasticsearch security features are enabled, your watch can index or search only on indices for which the user that stored the watch has privileges. If the user is able to read index a, but not index b, the same will apply when the watch runs.

``_ :param id: The identifier for the watch. :param actions: The list of actions that will be run if the condition matches. :param active: The initial state of the watch. The default value is `true`, which means the watch is active by default. :param condition: The condition that defines if the actions should be run. :param if_primary_term: only update the watch if the last operation that has changed the watch has the specified primary term :param if_seq_no: only update the watch if the last operation that has changed the watch has the specified sequence number :param input: The input that defines the input that loads the data for the watch. :param metadata: Metadata JSON that will be copied into the history entries. :param throttle_period: The minimum time between actions being run. The default is 5 seconds. This default can be changed in the config file with the setting `xpack.watcher.throttle.period.default_period`. If both this value and the `throttle_period_in_millis` parameter are specified, Watcher uses the last parameter included in the request. :param throttle_period_in_millis: Minimum time in milliseconds between actions being run. Defaults to 5000. If both this value and the throttle_period parameter are specified, Watcher uses the last parameter included in the request. :param transform: The transform that processes the watch payload to prepare it for the watch actions. :param trigger: The trigger that defines when the watch should run. :param version: Explicit version number for concurrency control """ if id in SKIP_IN_PATH: raise ValueError("Empty value passed for parameter 'id'") __path_parts: t.Dict[str, str] = {"id": _quote(id)} __path = f'/_watcher/watch/{__path_parts["id"]}' __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if active is not None: __query["active"] = active if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if if_primary_term is not None: __query["if_primary_term"] = if_primary_term if if_seq_no is not None: __query["if_seq_no"] = if_seq_no if pretty is not None: __query["pretty"] = pretty if version is not None: __query["version"] = version if not __body: if actions is not None: __body["actions"] = actions if condition is not None: __body["condition"] = condition if input is not None: __body["input"] = input if metadata is not None: __body["metadata"] = metadata if throttle_period is not None: __body["throttle_period"] = throttle_period if throttle_period_in_millis is not None: __body["throttle_period_in_millis"] = throttle_period_in_millis if transform is not None: __body["transform"] = transform if trigger is not None: __body["trigger"] = trigger if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.put_watch", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("from_", "query", "search_after", "size", "sort"), parameter_aliases={"from": "from_"}, ) def query_watches( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, from_: t.Optional[int] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, query: t.Optional[t.Mapping[str, t.Any]] = None, search_after: t.Optional[ t.Sequence[t.Union[None, bool, float, int, str]] ] = None, size: t.Optional[int] = None, sort: t.Optional[ t.Union[ t.Sequence[t.Union[str, t.Mapping[str, t.Any]]], t.Union[str, t.Mapping[str, t.Any]], ] ] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Query watches. Get all registered watches in a paginated manner and optionally filter watches by a query.

Note that only the _id and metadata.* fields are queryable or sortable.

``_ :param from_: The offset from the first result to fetch. It must be non-negative. :param query: A query that filters the watches to be returned. :param search_after: Retrieve the next page of hits using a set of sort values from the previous page. :param size: The number of hits to return. It must be non-negative. :param sort: One or more fields used to sort the search results. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_query/watches" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} # The 'sort' parameter with a colon can't be encoded to the body. if sort is not None and ( (isinstance(sort, str) and ":" in sort) or ( isinstance(sort, (list, tuple)) and all(isinstance(_x, str) for _x in sort) and any(":" in _x for _x in sort) ) ): __query["sort"] = sort sort = None if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty if not __body: if from_ is not None: __body["from"] = from_ if query is not None: __body["query"] = query if search_after is not None: __body["search_after"] = search_after if size is not None: __body["size"] = size if sort is not None: __body["sort"] = sort if not __body: __body = None # type: ignore[assignment] __headers = {"accept": "application/json"} if __body is not None: __headers["content-type"] = "application/json" return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.query_watches", path_parts=__path_parts, ) @_rewrite_parameters() def start( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Start the watch service. Start the Watcher service if it is not already running.

``_ :param master_timeout: Period to wait for a connection to the master node. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_start" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="watcher.start", path_parts=__path_parts, ) @_rewrite_parameters() def stats( self, *, metric: t.Optional[ t.Union[ t.Sequence[ t.Union[ str, t.Literal[ "_all", "current_watches", "pending_watches", "queued_watches", ], ] ], t.Union[ str, t.Literal[ "_all", "current_watches", "pending_watches", "queued_watches" ], ], ] ] = None, emit_stacktraces: t.Optional[bool] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get Watcher statistics. This API always returns basic metrics. You retrieve more metrics by using the metric parameter.

``_ :param metric: Defines which additional metrics are included in the response. :param emit_stacktraces: Defines whether stack traces are generated for each watch that is running. """ __path_parts: t.Dict[str, str] if metric not in SKIP_IN_PATH: __path_parts = {"metric": _quote(metric)} __path = f'/_watcher/stats/{__path_parts["metric"]}' else: __path_parts = {} __path = "/_watcher/stats" __query: t.Dict[str, t.Any] = {} if emit_stacktraces is not None: __query["emit_stacktraces"] = emit_stacktraces if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="watcher.stats", path_parts=__path_parts, ) @_rewrite_parameters() def stop( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Stop the watch service. Stop the Watcher service if it is running.

``_ :param master_timeout: The period to wait for the master node. If the master node is not available before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/_stop" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "POST", __path, params=__query, headers=__headers, endpoint_id="watcher.stop", path_parts=__path_parts, ) @_rewrite_parameters( body_fields=("index_auto_expand_replicas", "index_number_of_replicas"), parameter_aliases={ "index.auto_expand_replicas": "index_auto_expand_replicas", "index.number_of_replicas": "index_number_of_replicas", }, ) def update_settings( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, index_auto_expand_replicas: t.Optional[str] = None, index_number_of_replicas: t.Optional[int] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, body: t.Optional[t.Dict[str, t.Any]] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Update Watcher index settings. Update settings for the Watcher internal index (.watches). Only a subset of settings can be modified. This includes index.auto_expand_replicas, index.number_of_replicas, index.routing.allocation.exclude.*, index.routing.allocation.include.* and index.routing.allocation.require.*. Modification of index.routing.allocation.include._tier_preference is an exception and is not allowed as the Watcher shards must always be in the data_content tier.

``_ :param index_auto_expand_replicas: :param index_number_of_replicas: :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. :param timeout: The period to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. """ __path_parts: t.Dict[str, str] = {} __path = "/_watcher/settings" __query: t.Dict[str, t.Any] = {} __body: t.Dict[str, t.Any] = body if body is not None else {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty if timeout is not None: __query["timeout"] = timeout if not __body: if index_auto_expand_replicas is not None: __body["index.auto_expand_replicas"] = index_auto_expand_replicas if index_number_of_replicas is not None: __body["index.number_of_replicas"] = index_number_of_replicas __headers = {"accept": "application/json", "content-type": "application/json"} return self.perform_request( # type: ignore[return-value] "PUT", __path, params=__query, headers=__headers, body=__body, endpoint_id="watcher.update_settings", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_sync/client/xpack.py000066400000000000000000000117661506101734100245470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import typing as t from elastic_transport import ObjectApiResponse from ._base import NamespacedClient from .utils import _rewrite_parameters class XPackClient(NamespacedClient): def __getattr__(self, attr_name: str) -> t.Any: return getattr(self.client, attr_name) # AUTO-GENERATED-API-DEFINITIONS # @_rewrite_parameters() def info( self, *, accept_enterprise: t.Optional[bool] = None, categories: t.Optional[ t.Sequence[t.Union[str, t.Literal["build", "features", "license"]]] ] = None, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get information. The information provided by the API includes:

  • Build information including the build number and timestamp.
  • License information about the currently installed license.
  • Feature information for the features that are currently enabled and available under the current license.
``_ :param accept_enterprise: If this param is used it must be set to true :param categories: A comma-separated list of the information categories to include in the response. For example, `build,license,features`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack" __query: t.Dict[str, t.Any] = {} if accept_enterprise is not None: __query["accept_enterprise"] = accept_enterprise if categories is not None: __query["categories"] = categories if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="xpack.info", path_parts=__path_parts, ) @_rewrite_parameters() def usage( self, *, error_trace: t.Optional[bool] = None, filter_path: t.Optional[t.Union[str, t.Sequence[str]]] = None, human: t.Optional[bool] = None, master_timeout: t.Optional[t.Union[str, t.Literal[-1], t.Literal[0]]] = None, pretty: t.Optional[bool] = None, ) -> ObjectApiResponse[t.Any]: """ .. raw:: html

Get usage information. Get information about the features that are currently enabled and available under the current license. The API also provides some usage statistics.

``_ :param master_timeout: The period to wait for a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. To indicate that the request should never timeout, set it to `-1`. """ __path_parts: t.Dict[str, str] = {} __path = "/_xpack/usage" __query: t.Dict[str, t.Any] = {} if error_trace is not None: __query["error_trace"] = error_trace if filter_path is not None: __query["filter_path"] = filter_path if human is not None: __query["human"] = human if master_timeout is not None: __query["master_timeout"] = master_timeout if pretty is not None: __query["pretty"] = pretty __headers = {"accept": "application/json"} return self.perform_request( # type: ignore[return-value] "GET", __path, params=__query, headers=__headers, endpoint_id="xpack.usage", path_parts=__path_parts, ) python-elasticsearch-9.1.1/elasticsearch/_utils.py000066400000000000000000000026131506101734100223360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import re from typing import Any, Dict def fixup_module_metadata(module_name: str, namespace: Dict[str, Any]) -> None: # Yoinked from python-trio/outcome, thanks Nathaniel! License: MIT def fix_one(obj: Any) -> None: mod = getattr(obj, "__module__", None) if mod is not None and re.match(r"^elasticsearch[0-9]*\.", mod) is not None: obj.__module__ = module_name if isinstance(obj, type): for attr_value in obj.__dict__.values(): fix_one(attr_value) for objname in namespace["__all__"]: obj = namespace[objname] fix_one(obj) python-elasticsearch-9.1.1/elasticsearch/_version.py000066400000000000000000000014551506101734100226660ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. __versionstr__ = "9.1.1" python-elasticsearch-9.1.1/elasticsearch/client.py000066400000000000000000000125721506101734100223220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import warnings from ._sync.client import Elasticsearch as Elasticsearch # noqa: F401 from ._sync.client.async_search import ( # noqa: F401 AsyncSearchClient as AsyncSearchClient, ) from ._sync.client.autoscaling import ( # noqa: F401 AutoscalingClient as AutoscalingClient, ) from ._sync.client.cat import CatClient as CatClient # noqa: F401 from ._sync.client.ccr import CcrClient as CcrClient # noqa: F401 from ._sync.client.cluster import ClusterClient as ClusterClient # noqa: F401 from ._sync.client.connector import ConnectorClient as ConnectorClient # noqa: F401 from ._sync.client.dangling_indices import ( # noqa: F401 DanglingIndicesClient as DanglingIndicesClient, ) from ._sync.client.enrich import EnrichClient as EnrichClient # noqa: F401 from ._sync.client.eql import EqlClient as EqlClient # noqa: F401 from ._sync.client.esql import EsqlClient as EsqlClient # noqa: F401 from ._sync.client.features import FeaturesClient as FeaturesClient # noqa: F401 from ._sync.client.fleet import FleetClient as FleetClient # noqa: F401 from ._sync.client.graph import GraphClient as GraphClient # noqa: F401 from ._sync.client.ilm import IlmClient as IlmClient # noqa: F401 from ._sync.client.indices import IndicesClient as IndicesClient # noqa: F401 from ._sync.client.inference import InferenceClient as InferenceClient # noqa: F401 from ._sync.client.ingest import IngestClient as IngestClient # noqa: F401 from ._sync.client.license import LicenseClient as LicenseClient # noqa: F401 from ._sync.client.logstash import LogstashClient as LogstashClient # noqa: F401 from ._sync.client.migration import MigrationClient as MigrationClient # noqa: F401 from ._sync.client.ml import MlClient as MlClient # noqa: F401 from ._sync.client.monitoring import MonitoringClient as MonitoringClient # noqa: F401 from ._sync.client.nodes import NodesClient as NodesClient # noqa: F401 from ._sync.client.query_rules import QueryRulesClient as QueryRulesClient # noqa: F401 from ._sync.client.rollup import RollupClient as RollupClient # noqa: F401 from ._sync.client.search_application import ( # noqa: F401 SearchApplicationClient as SearchApplicationClient, ) from ._sync.client.searchable_snapshots import ( # noqa: F401 SearchableSnapshotsClient as SearchableSnapshotsClient, ) from ._sync.client.security import SecurityClient as SecurityClient # noqa: F401 from ._sync.client.shutdown import ShutdownClient as ShutdownClient # noqa: F401 from ._sync.client.simulate import SimulateClient as SimulateClient # noqa: F401 from ._sync.client.slm import SlmClient as SlmClient # noqa: F401 from ._sync.client.snapshot import SnapshotClient as SnapshotClient # noqa: F401 from ._sync.client.sql import SqlClient as SqlClient # noqa: F401 from ._sync.client.ssl import SslClient as SslClient # noqa: F401 from ._sync.client.synonyms import SynonymsClient as SynonymsClient # noqa: F401 from ._sync.client.tasks import TasksClient as TasksClient # noqa: F401 from ._sync.client.text_structure import ( # noqa: F401 TextStructureClient as TextStructureClient, ) from ._sync.client.transform import TransformClient as TransformClient # noqa: F401 from ._sync.client.watcher import WatcherClient as WatcherClient # noqa: F401 from ._sync.client.xpack import XPackClient as XPackClient # noqa: F401 from ._utils import fixup_module_metadata # This file exists for backwards compatibility. # We can't remove it as we use it for the Sphinx docs which show the full page, and we'd # rather show `elasticsearch.client.FooClient` than `elasticsearch._sync.client.FooClient`. warnings.warn( "Importing from the 'elasticsearch.client' module is deprecated. " "Instead use 'elasticsearch' module for importing the client.", category=DeprecationWarning, stacklevel=2, ) __all__ = [ "AsyncSearchClient", "AutoscalingClient", "CatClient", "CcrClient", "ClusterClient", "ConnectorClient", "DanglingIndicesClient", "Elasticsearch", "EnrichClient", "EqlClient", "FeaturesClient", "FleetClient", "GraphClient", "IlmClient", "IndicesClient", "IngestClient", "LicenseClient", "LogstashClient", "MigrationClient", "MlClient", "MonitoringClient", "NodesClient", "RollupClient", "SearchApplicationClient", "SearchableSnapshotsClient", "SecurityClient", "ShutdownClient", "SimulateClient", "SlmClient", "SnapshotClient", "SqlClient", "SslClient", "TasksClient", "TextStructureClient", "TransformClient", "WatcherClient", "XPackClient", ] fixup_module_metadata(__name__, globals()) del fixup_module_metadata python-elasticsearch-9.1.1/elasticsearch/compat.py000066400000000000000000000054121506101734100223220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import inspect import os import sys from pathlib import Path from typing import Tuple, Type, Union string_types: Tuple[Type[str], Type[bytes]] = (str, bytes) DISABLE_WARN_STACKLEVEL_ENV_VAR = "DISABLE_WARN_STACKLEVEL" def to_str(x: Union[str, bytes], encoding: str = "ascii") -> str: if not isinstance(x, str): return x.decode(encoding) return x def to_bytes(x: Union[str, bytes], encoding: str = "ascii") -> bytes: if not isinstance(x, bytes): return x.encode(encoding) return x def warn_stacklevel() -> int: """Dynamically determine warning stacklevel for warnings based on the call stack""" if os.environ.get(DISABLE_WARN_STACKLEVEL_ENV_VAR) in ["1", "true", "True"]: return 0 try: # Grab the root module from the current module '__name__' module_name = __name__.partition(".")[0] module_path = Path(sys.modules[module_name].__file__) # type: ignore[arg-type] # If the module is a folder we're looking at # subdirectories, otherwise we're looking for # an exact match. module_is_folder = module_path.name == "__init__.py" if module_is_folder: module_path = module_path.parent # Look through frames until we find a file that # isn't a part of our module, then return that stacklevel. for level, frame in enumerate(inspect.stack()): # Garbage collecting frames frame_filename = Path(frame.filename) del frame if ( # If the module is a folder we look at subdirectory module_is_folder and module_path not in frame_filename.parents ) or ( # Otherwise we're looking for an exact match. not module_is_folder and module_path != frame_filename ): return level except KeyError: pass return 0 __all__ = [ "string_types", "to_str", "to_bytes", "warn_stacklevel", ] python-elasticsearch-9.1.1/elasticsearch/dsl/000077500000000000000000000000001506101734100212455ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/dsl/__init__.py000066400000000000000000000103241506101734100233560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from . import async_connections, connections from .aggs import A, Agg from .analysis import analyzer, char_filter, normalizer, token_filter, tokenizer from .document import AsyncDocument, Document from .document_base import E, InnerDoc, M, MetaField, mapped_field from .exceptions import ( ElasticsearchDslException, IllegalOperation, UnknownDslObject, ValidationException, ) from .faceted_search import ( AsyncFacetedSearch, DateHistogramFacet, Facet, FacetedResponse, FacetedSearch, HistogramFacet, NestedFacet, RangeFacet, TermsFacet, ) from .field import ( Binary, Boolean, Byte, Completion, ConstantKeyword, CustomField, Date, DateRange, DenseVector, Double, DoubleRange, Field, Float, FloatRange, GeoPoint, GeoShape, HalfFloat, Integer, IntegerRange, Ip, IpRange, Join, Keyword, Long, LongRange, Murmur3, Nested, Object, Percolator, Point, RangeField, RankFeature, RankFeatures, ScaledFloat, SearchAsYouType, Shape, Short, SparseVector, Text, TokenCount, construct_field, ) from .function import SF from .index import ( AsyncComposableIndexTemplate, AsyncIndex, AsyncIndexTemplate, ComposableIndexTemplate, Index, IndexTemplate, ) from .mapping import AsyncMapping, Mapping from .query import Q, Query from .response import AggResponse, Response, UpdateByQueryResponse from .search import ( AsyncEmptySearch, AsyncMultiSearch, AsyncSearch, EmptySearch, MultiSearch, Search, ) from .update_by_query import AsyncUpdateByQuery, UpdateByQuery from .utils import AttrDict, AttrList, DslBase from .wrappers import Range __all__ = [ "A", "Agg", "AggResponse", "AsyncComposableIndexTemplate", "AsyncDocument", "AsyncEmptySearch", "AsyncFacetedSearch", "AsyncIndex", "AsyncIndexTemplate", "AsyncMapping", "AsyncMultiSearch", "AsyncSearch", "AsyncUpdateByQuery", "AttrDict", "AttrList", "Binary", "Boolean", "Byte", "Completion", "ComposableIndexTemplate", "ConstantKeyword", "CustomField", "Date", "DateHistogramFacet", "DateRange", "DenseVector", "Document", "Double", "DoubleRange", "DslBase", "E", "ElasticsearchDslException", "EmptySearch", "Facet", "FacetedResponse", "FacetedSearch", "Field", "Float", "FloatRange", "GeoPoint", "GeoShape", "HalfFloat", "HistogramFacet", "IllegalOperation", "Index", "IndexTemplate", "InnerDoc", "Integer", "IntegerRange", "Ip", "IpRange", "Join", "Keyword", "Long", "LongRange", "M", "Mapping", "MetaField", "MultiSearch", "Murmur3", "Nested", "NestedFacet", "Object", "Percolator", "Point", "Q", "Query", "Range", "RangeFacet", "RangeField", "RankFeature", "RankFeatures", "Response", "SF", "ScaledFloat", "Search", "SearchAsYouType", "Shape", "Short", "SparseVector", "TermsFacet", "Text", "TokenCount", "UnknownDslObject", "UpdateByQuery", "UpdateByQueryResponse", "ValidationException", "analyzer", "async_connections", "char_filter", "connections", "construct_field", "mapped_field", "normalizer", "token_filter", "tokenizer", ] python-elasticsearch-9.1.1/elasticsearch/dsl/_async/000077500000000000000000000000001506101734100225215ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/dsl/_async/__init__.py000066400000000000000000000014231506101734100246320ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/dsl/_async/document.py000066400000000000000000000556611506101734100247260ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from typing import ( TYPE_CHECKING, Any, AsyncIterable, AsyncIterator, Dict, List, Optional, Tuple, Union, cast, ) from typing_extensions import Self, dataclass_transform from elasticsearch.exceptions import NotFoundError, RequestError from elasticsearch.helpers import async_bulk from .._async.index import AsyncIndex from ..async_connections import get_connection from ..document_base import DocumentBase, DocumentMeta, mapped_field from ..exceptions import IllegalOperation from ..utils import DOC_META_FIELDS, META_FIELDS, AsyncUsingType, merge from .search import AsyncSearch if TYPE_CHECKING: from elasticsearch import AsyncElasticsearch from elasticsearch.esql.esql import ESQLBase class AsyncIndexMeta(DocumentMeta): _index: AsyncIndex # global flag to guard us from associating an Index with the base Document # class, only user defined subclasses should have an _index attr _document_initialized = False def __new__( cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] ) -> "AsyncIndexMeta": new_cls = super().__new__(cls, name, bases, attrs) if cls._document_initialized: index_opts = attrs.pop("Index", None) index = cls.construct_index(index_opts, bases) new_cls._index = index index.document(new_cls) cls._document_initialized = True return cast(AsyncIndexMeta, new_cls) @classmethod def construct_index( cls, opts: Dict[str, Any], bases: Tuple[type, ...] ) -> AsyncIndex: if opts is None: for b in bases: if hasattr(b, "_index"): return b._index # Set None as Index name so it will set _all while making the query return AsyncIndex(name=None) i = AsyncIndex( getattr(opts, "name", "*"), using=getattr(opts, "using", "default") ) i.settings(**getattr(opts, "settings", {})) i.aliases(**getattr(opts, "aliases", {})) for a in getattr(opts, "analyzers", ()): i.analyzer(a) return i @dataclass_transform(field_specifiers=(mapped_field,)) class AsyncDocument(DocumentBase, metaclass=AsyncIndexMeta): """ Model-like class for persisting documents in elasticsearch. """ if TYPE_CHECKING: _index: AsyncIndex @classmethod def _get_using(cls, using: Optional[AsyncUsingType] = None) -> AsyncUsingType: return using or cls._index._using @classmethod def _get_connection( cls, using: Optional[AsyncUsingType] = None ) -> "AsyncElasticsearch": return get_connection(cls._get_using(using)) @classmethod async def init( cls, index: Optional[str] = None, using: Optional[AsyncUsingType] = None ) -> None: """ Create the index and populate the mappings in elasticsearch. """ i = cls._index if index: i = i.clone(name=index) await i.save(using=using) @classmethod def search( cls, using: Optional[AsyncUsingType] = None, index: Optional[str] = None ) -> AsyncSearch[Self]: """ Create an :class:`~elasticsearch.dsl.Search` instance that will search over this ``Document``. """ return AsyncSearch( using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls] ) @classmethod async def get( cls, id: str, using: Optional[AsyncUsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> Optional[Self]: """ Retrieve a single document from elasticsearch using its ``id``. :arg id: ``id`` of the document to be retrieved :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.get`` unchanged. """ es = cls._get_connection(using) doc = await es.get(index=cls._default_index(index), id=id, **kwargs) if not doc.get("found", False): return None return cls.from_es(doc) @classmethod async def exists( cls, id: str, using: Optional[AsyncUsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> bool: """ check if exists a single document from elasticsearch using its ``id``. :arg id: ``id`` of the document to check if exists :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.exists`` unchanged. """ es = cls._get_connection(using) return bool(await es.exists(index=cls._default_index(index), id=id, **kwargs)) @classmethod async def mget( cls, docs: List[Dict[str, Any]], using: Optional[AsyncUsingType] = None, index: Optional[str] = None, raise_on_error: bool = True, missing: str = "none", **kwargs: Any, ) -> List[Optional[Self]]: r""" Retrieve multiple document by their ``id``\s. Returns a list of instances in the same order as requested. :arg docs: list of ``id``\s of the documents to be retrieved or a list of document specifications as per https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg missing: what to do when one of the documents requested is not found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise ``NotFoundError``) or ``'skip'`` (ignore the missing document). Any additional keyword arguments will be passed to ``Elasticsearch.mget`` unchanged. """ if missing not in ("raise", "skip", "none"): raise ValueError("'missing' must be 'raise', 'skip', or 'none'.") es = cls._get_connection(using) body = { "docs": [ doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc} for doc in docs ] } results = await es.mget(index=cls._default_index(index), body=body, **kwargs) objs: List[Optional[Self]] = [] error_docs: List[Self] = [] missing_docs: List[Self] = [] for doc in results["docs"]: if doc.get("found"): if error_docs or missing_docs: # We're going to raise an exception anyway, so avoid an # expensive call to cls.from_es(). continue objs.append(cls.from_es(doc)) elif doc.get("error"): if raise_on_error: error_docs.append(doc) if missing == "none": objs.append(None) # The doc didn't cause an error, but the doc also wasn't found. elif missing == "raise": missing_docs.append(doc) elif missing == "none": objs.append(None) if error_docs: error_ids = [doc["_id"] for doc in error_docs] message = "Required routing not provided for documents %s." message %= ", ".join(error_ids) raise RequestError(400, message, error_docs) # type: ignore[arg-type] if missing_docs: missing_ids = [doc["_id"] for doc in missing_docs] message = f"Documents {', '.join(missing_ids)} not found." raise NotFoundError(404, message, {"docs": missing_docs}) # type: ignore[arg-type] return objs async def delete( self, using: Optional[AsyncUsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> None: """ Delete the instance in elasticsearch. :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.delete`` unchanged. """ es = self._get_connection(using) # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} # Optimistic concurrency control if "seq_no" in self.meta and "primary_term" in self.meta: doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] doc_meta.update(kwargs) i = self._get_index(index) assert i is not None await es.delete(index=i, **doc_meta) async def update( self, using: Optional[AsyncUsingType] = None, index: Optional[str] = None, detect_noop: bool = True, doc_as_upsert: bool = False, refresh: bool = False, retry_on_conflict: Optional[int] = None, script: Optional[Union[str, Dict[str, Any]]] = None, script_id: Optional[str] = None, scripted_upsert: bool = False, upsert: Optional[Dict[str, Any]] = None, return_doc_meta: bool = False, **fields: Any, ) -> Any: """ Partial update of the document, specify fields you wish to update and both the instance and the document in elasticsearch will be updated:: doc = MyDocument(title='Document Title!') doc.save() doc.update(title='New Document Title!') :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg detect_noop: Set to ``False`` to disable noop detection. :arg refresh: Control when the changes made by this request are visible to search. Set to ``True`` for immediate effect. :arg retry_on_conflict: In between the get and indexing phases of the update, it is possible that another process might have already updated the same document. By default, the update will fail with a version conflict exception. The retry_on_conflict parameter controls how many times to retry the update before finally throwing an exception. :arg doc_as_upsert: Instead of sending a partial doc plus an upsert doc, setting doc_as_upsert to true will use the contents of doc as the upsert value :arg script: the source code of the script as a string, or a dictionary with script attributes to update. :arg return_doc_meta: set to ``True`` to return all metadata from the index API call instead of only the operation result :return: operation result noop/updated """ body: Dict[str, Any] = { "doc_as_upsert": doc_as_upsert, "detect_noop": detect_noop, } # scripted update if script or script_id: if upsert is not None: body["upsert"] = upsert if script: if isinstance(script, str): script = {"source": script} else: script = {"id": script_id} if "params" not in script: script["params"] = fields else: script["params"].update(fields) body["script"] = script body["scripted_upsert"] = scripted_upsert # partial document update else: if not fields: raise IllegalOperation( "You cannot call update() without updating individual fields or a script. " "If you wish to update the entire object use save()." ) # update given fields locally merge(self, fields) # prepare data for ES values = self.to_dict(skip_empty=False) # if fields were given: partial update body["doc"] = {k: values.get(k) for k in fields.keys()} # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} if retry_on_conflict is not None: doc_meta["retry_on_conflict"] = retry_on_conflict # Optimistic concurrency control if ( retry_on_conflict in (None, 0) and "seq_no" in self.meta and "primary_term" in self.meta ): doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] i = self._get_index(index) assert i is not None meta = await self._get_connection(using).update( index=i, body=body, refresh=refresh, **doc_meta ) # update meta information from ES for k in META_FIELDS: if "_" + k in meta: setattr(self.meta, k, meta["_" + k]) return meta if return_doc_meta else meta["result"] async def save( self, using: Optional[AsyncUsingType] = None, index: Optional[str] = None, validate: bool = True, skip_empty: bool = True, return_doc_meta: bool = False, **kwargs: Any, ) -> Any: """ Save the document into elasticsearch. If the document doesn't exist it is created, it is overwritten otherwise. Returns ``True`` if this operations resulted in new document being created. :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg validate: set to ``False`` to skip validating the document :arg skip_empty: if set to ``False`` will cause empty values (``None``, ``[]``, ``{}``) to be left on the document. Those values will be stripped out otherwise as they make no difference in elasticsearch. :arg return_doc_meta: set to ``True`` to return all metadata from the update API call instead of only the operation result Any additional keyword arguments will be passed to ``Elasticsearch.index`` unchanged. :return: operation result created/updated """ if validate: self.full_clean() es = self._get_connection(using) # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} # Optimistic concurrency control if "seq_no" in self.meta and "primary_term" in self.meta: doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] doc_meta.update(kwargs) i = self._get_index(index) assert i is not None meta = await es.index( index=i, body=self.to_dict(skip_empty=skip_empty), **doc_meta, ) # update meta information from ES for k in META_FIELDS: if "_" + k in meta: setattr(self.meta, k, meta["_" + k]) return meta if return_doc_meta else meta["result"] @classmethod async def bulk( cls, actions: AsyncIterable[Union[Self, Dict[str, Any]]], using: Optional[AsyncUsingType] = None, index: Optional[str] = None, validate: bool = True, skip_empty: bool = True, **kwargs: Any, ) -> Tuple[int, Union[int, List[Any]]]: """ Allows to perform multiple indexing operations in a single request. :arg actions: a generator that returns document instances to be indexed, bulk operation dictionaries. :arg using: connection alias to use, defaults to ``'default'`` :arg index: Elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg validate: set to ``False`` to skip validating the documents :arg skip_empty: if set to ``False`` will cause empty values (``None``, ``[]``, ``{}``) to be left on the document. Those values will be stripped out otherwise as they make no difference in Elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.bulk`` unchanged. :return: bulk operation results """ es = cls._get_connection(using) i = cls._default_index(index) assert i is not None class Generate: def __init__( self, doc_iterator: AsyncIterable[Union[AsyncDocument, Dict[str, Any]]], ): self.doc_iterator = doc_iterator.__aiter__() def __aiter__(self) -> Self: return self async def __anext__(self) -> Dict[str, Any]: doc: Optional[Union[AsyncDocument, Dict[str, Any]]] = ( await self.doc_iterator.__anext__() ) if isinstance(doc, dict): action = doc doc = None if "_source" in action and isinstance( action["_source"], AsyncDocument ): doc = action["_source"] if validate: # pragma: no cover doc.full_clean() action["_source"] = doc.to_dict( include_meta=False, skip_empty=skip_empty ) elif doc is not None: if validate: # pragma: no cover doc.full_clean() action = doc.to_dict(include_meta=True, skip_empty=skip_empty) if "_index" not in action: action["_index"] = i return action return await async_bulk(es, Generate(actions), **kwargs) @classmethod async def esql_execute( cls, query: "ESQLBase", return_additional: bool = False, ignore_missing_fields: bool = False, using: Optional[AsyncUsingType] = None, **kwargs: Any, ) -> AsyncIterator[Union[Self, Tuple[Self, Dict[str, Any]]]]: """ Execute the given ES|QL query and return an iterator of 2-element tuples, where the first element is an instance of this ``Document`` and the second a dictionary with any remaining columns requested in the query. :arg query: an ES|QL query object created with the ``esql_from()`` method. :arg return_additional: if ``False`` (the default), this method returns document objects. If set to ``True``, the method returns tuples with a document in the first element and a dictionary with any additional columns returned by the query in the second element. :arg ignore_missing_fields: if ``False`` (the default), all the fields of the document must be present in the query, or else an exception is raised. Set to ``True`` to allow missing fields, which will result in partially initialized document objects. :arg using: connection alias to use, defaults to ``'default'`` :arg kwargs: additional options for the ``client.esql.query()`` function. """ es = cls._get_connection(using) response = await es.esql.query(query=str(query), **kwargs) query_columns = [col["name"] for col in response.body.get("columns", [])] # Here we get the list of columns defined in the document, which are the # columns that we will take from each result to assemble the document # object. # When `for_esql=False` is passed below by default, the list will include # nested fields, which ES|QL does not return, causing an error. When passing # `ignore_missing_fields=True` the list will be generated with # `for_esql=True`, so the error will not occur, but the documents will # not have any Nested objects in them. doc_fields = set(cls._get_field_names(for_esql=ignore_missing_fields)) if not ignore_missing_fields and not doc_fields.issubset(set(query_columns)): raise ValueError( f"Not all fields of {cls.__name__} were returned by the query. " "Make sure your document does not use Nested fields, which are " "currently not supported in ES|QL. To force the query to be " "evaluated in spite of the missing fields, pass set the " "ignore_missing_fields=True option in the esql_execute() call." ) non_doc_fields: set[str] = set(query_columns) - doc_fields - {"_id"} index_id = query_columns.index("_id") results = response.body.get("values", []) for column_values in results: # create a dictionary with all the document fields, expanding the # dot notation returned by ES|QL into the recursive dictionaries # used by Document.from_dict() doc_dict: Dict[str, Any] = {} for col, val in zip(query_columns, column_values): if col in doc_fields: cols = col.split(".") d = doc_dict for c in cols[:-1]: if c not in d: d[c] = {} d = d[c] d[cols[-1]] = val # create the document instance obj = cls(meta={"_id": column_values[index_id]}) obj._from_dict(doc_dict) if return_additional: # build a dict with any other values included in the response other = { col: val for col, val in zip(query_columns, column_values) if col in non_doc_fields } yield obj, other else: yield obj python-elasticsearch-9.1.1/elasticsearch/dsl/_async/faceted_search.py000066400000000000000000000033171506101734100260170ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING from ..faceted_search_base import FacetedResponse, FacetedSearchBase from ..utils import _R from .search import AsyncSearch if TYPE_CHECKING: from ..response import Response class AsyncFacetedSearch(FacetedSearchBase[_R]): _s: AsyncSearch[_R] async def count(self) -> int: return await self._s.count() def search(self) -> AsyncSearch[_R]: """ Returns the base Search object to which the facets are added. You can customize the query by overriding this method and returning a modified search object. """ s = AsyncSearch[_R](doc_type=self.doc_types, index=self.index, using=self.using) return s.response_class(FacetedResponse) async def execute(self) -> "Response[_R]": """ Execute the search and return the response. """ r = await self._s.execute() r._faceted_search = self return r python-elasticsearch-9.1.1/elasticsearch/dsl/_async/index.py000066400000000000000000000543551506101734100242160ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING, Any, Dict, Optional from typing_extensions import Self from ..async_connections import get_connection from ..exceptions import IllegalOperation from ..index_base import IndexBase from ..utils import AsyncUsingType from .mapping import AsyncMapping from .search import AsyncSearch from .update_by_query import AsyncUpdateByQuery if TYPE_CHECKING: from elastic_transport import ObjectApiResponse from elasticsearch import AsyncElasticsearch class AsyncIndexTemplate: def __init__( self, name: str, template: str, index: Optional["AsyncIndex"] = None, order: Optional[int] = None, **kwargs: Any, ): if index is None: self._index = AsyncIndex(template, **kwargs) else: if kwargs: raise ValueError( "You cannot specify options for Index when" " passing an Index instance." ) self._index = index.clone() self._index._name = template self._template_name = name self.order = order def __getattr__(self, attr_name: str) -> Any: return getattr(self._index, attr_name) def to_dict(self) -> Dict[str, Any]: d = self._index.to_dict() d["index_patterns"] = [self._index._name] if self.order is not None: d["order"] = self.order return d async def save( self, using: Optional[AsyncUsingType] = None ) -> "ObjectApiResponse[Any]": es = get_connection(using or self._index._using) return await es.indices.put_template( name=self._template_name, body=self.to_dict() ) class AsyncComposableIndexTemplate: def __init__( self, name: str, template: str, index: Optional["AsyncIndex"] = None, priority: Optional[int] = None, **kwargs: Any, ): if index is None: self._index = AsyncIndex(template, **kwargs) else: if kwargs: raise ValueError( "You cannot specify options for Index when" " passing an Index instance." ) self._index = index.clone() self._index._name = template self._template_name = name self.priority = priority def __getattr__(self, attr_name: str) -> Any: return getattr(self._index, attr_name) def to_dict(self) -> Dict[str, Any]: d: Dict[str, Any] = {"template": self._index.to_dict()} d["index_patterns"] = [self._index._name] if self.priority is not None: d["priority"] = self.priority return d async def save( self, using: Optional[AsyncUsingType] = None ) -> "ObjectApiResponse[Any]": es = get_connection(using or self._index._using) return await es.indices.put_index_template( name=self._template_name, **self.to_dict() ) class AsyncIndex(IndexBase): _using: AsyncUsingType if TYPE_CHECKING: def get_or_create_mapping(self) -> AsyncMapping: ... def __init__(self, name: str, using: AsyncUsingType = "default"): """ :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ super().__init__(name, AsyncMapping, using=using) def _get_connection( self, using: Optional[AsyncUsingType] = None ) -> "AsyncElasticsearch": if self._name is None: raise ValueError("You cannot perform API calls on the default index.") return get_connection(using or self._using) connection = property(_get_connection) def as_template( self, template_name: str, pattern: Optional[str] = None, order: Optional[int] = None, ) -> AsyncIndexTemplate: return AsyncIndexTemplate( template_name, pattern or self._name, index=self, order=order ) def as_composable_template( self, template_name: str, pattern: Optional[str] = None, priority: Optional[int] = None, ) -> AsyncComposableIndexTemplate: return AsyncComposableIndexTemplate( template_name, pattern or self._name, index=self, priority=priority ) async def load_mappings(self, using: Optional[AsyncUsingType] = None) -> None: await self.get_or_create_mapping().update_from_es( self._name, using=using or self._using ) def clone( self, name: Optional[str] = None, using: Optional[AsyncUsingType] = None ) -> Self: """ Create a copy of the instance with another name or connection alias. Useful for creating multiple indices with shared configuration:: i = Index('base-index') i.settings(number_of_shards=1) i.create() i2 = i.clone('other-index') i2.create() :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ i = self.__class__(name or self._name, using=using or self._using) i._settings = self._settings.copy() i._aliases = self._aliases.copy() i._analysis = self._analysis.copy() i._doc_types = self._doc_types[:] if self._mapping is not None: i._mapping = self._mapping._clone() return i def search(self, using: Optional[AsyncUsingType] = None) -> AsyncSearch: """ Return a :class:`~elasticsearch.dsl.Search` object searching over the index (or all the indices belonging to this template) and its ``Document``\\s. """ return AsyncSearch( using=using or self._using, index=self._name, doc_type=self._doc_types ) def updateByQuery( self, using: Optional[AsyncUsingType] = None ) -> AsyncUpdateByQuery: """ Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index (or all the indices belonging to this template) and updating Documents that match the search criteria. For more information, see here: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html """ return AsyncUpdateByQuery( using=using or self._using, index=self._name, ) async def create( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged. """ return await self._get_connection(using).indices.create( index=self._name, body=self.to_dict(), **kwargs ) async def is_closed(self, using: Optional[AsyncUsingType] = None) -> bool: state = await self._get_connection(using).cluster.state( index=self._name, metric="metadata" ) return bool(state["metadata"]["indices"][self._name]["state"] == "close") async def save( self, using: Optional[AsyncUsingType] = None ) -> "Optional[ObjectApiResponse[Any]]": """ Sync the index definition with elasticsearch, creating the index if it doesn't exist and updating its settings and mappings if it does. Note some settings and mapping changes cannot be done on an open index (or at all on an existing index) and for those this method will fail with the underlying exception. """ if not await self.exists(using=using): return await self.create(using=using) body = self.to_dict() settings = body.pop("settings", {}) analysis = settings.pop("analysis", None) current_settings = (await self.get_settings(using=using))[self._name][ "settings" ]["index"] if analysis: if await self.is_closed(using=using): # closed index, update away settings["analysis"] = analysis else: # compare analysis definition, if all analysis objects are # already defined as requested, skip analysis update and # proceed, otherwise raise IllegalOperation existing_analysis = current_settings.get("analysis", {}) if any( existing_analysis.get(section, {}).get(k, None) != analysis[section][k] for section in analysis for k in analysis[section] ): raise IllegalOperation( "You cannot update analysis configuration on an open index, " "you need to close index %s first." % self._name ) # try and update the settings if settings: settings = settings.copy() for k, v in list(settings.items()): if k in current_settings and current_settings[k] == str(v): del settings[k] if settings: await self.put_settings(using=using, body=settings) # update the mappings, any conflict in the mappings will result in an # exception mappings = body.pop("mappings", {}) if mappings: return await self.put_mapping(using=using, body=mappings) return None async def analyze( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Perform the analysis process on a text and return the tokens breakdown of the text. Any additional keyword arguments will be passed to ``Elasticsearch.indices.analyze`` unchanged. """ return await self._get_connection(using).indices.analyze( index=self._name, **kwargs ) async def refresh( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Performs a refresh operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.refresh`` unchanged. """ return await self._get_connection(using).indices.refresh( index=self._name, **kwargs ) async def flush( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Performs a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged. """ return await self._get_connection(using).indices.flush( index=self._name, **kwargs ) async def get( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged. """ return await self._get_connection(using).indices.get(index=self._name, **kwargs) async def open( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Opens the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.open`` unchanged. """ return await self._get_connection(using).indices.open( index=self._name, **kwargs ) async def close( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Closes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.close`` unchanged. """ return await self._get_connection(using).indices.close( index=self._name, **kwargs ) async def delete( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Deletes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete`` unchanged. """ return await self._get_connection(using).indices.delete( index=self._name, **kwargs ) async def exists( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> bool: """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return bool( await self._get_connection(using).indices.exists(index=self._name, **kwargs) ) async def put_mapping( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged. """ return await self._get_connection(using).indices.put_mapping( index=self._name, **kwargs ) async def get_mapping( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_mapping`` unchanged. """ return await self._get_connection(using).indices.get_mapping( index=self._name, **kwargs ) async def get_field_mapping( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged. """ return await self._get_connection(using).indices.get_field_mapping( index=self._name, **kwargs ) async def put_alias( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Create an alias for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_alias`` unchanged. """ return await self._get_connection(using).indices.put_alias( index=self._name, **kwargs ) async def exists_alias( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> bool: """ Return a boolean indicating whether given alias exists for this index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_alias`` unchanged. """ return bool( await self._get_connection(using).indices.exists_alias( index=self._name, **kwargs ) ) async def get_alias( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve a specified alias. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_alias`` unchanged. """ return await self._get_connection(using).indices.get_alias( index=self._name, **kwargs ) async def delete_alias( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Delete specific alias. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete_alias`` unchanged. """ return await self._get_connection(using).indices.delete_alias( index=self._name, **kwargs ) async def get_settings( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve settings for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_settings`` unchanged. """ return await self._get_connection(using).indices.get_settings( index=self._name, **kwargs ) async def put_settings( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Change specific index level settings in real time. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_settings`` unchanged. """ return await self._get_connection(using).indices.put_settings( index=self._name, **kwargs ) async def stats( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve statistics on different operations happening on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.stats`` unchanged. """ return await self._get_connection(using).indices.stats( index=self._name, **kwargs ) async def segments( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Provide low level segments information that a Lucene index (shard level) is built with. Any additional keyword arguments will be passed to ``Elasticsearch.indices.segments`` unchanged. """ return await self._get_connection(using).indices.segments( index=self._name, **kwargs ) async def validate_query( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Validate a potentially expensive query without executing it. Any additional keyword arguments will be passed to ``Elasticsearch.indices.validate_query`` unchanged. """ return await self._get_connection(using).indices.validate_query( index=self._name, **kwargs ) async def clear_cache( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Clear all caches or specific cached associated with the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.clear_cache`` unchanged. """ return await self._get_connection(using).indices.clear_cache( index=self._name, **kwargs ) async def recovery( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The indices recovery API provides insight into on-going shard recoveries for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.recovery`` unchanged. """ return await self._get_connection(using).indices.recovery( index=self._name, **kwargs ) async def shard_stores( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Provides store information for shard copies of the index. Store information reports on which nodes shard copies exist, the shard copy version, indicating how recent they are, and any exceptions encountered while opening the shard index or from earlier engine failure. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shard_stores`` unchanged. """ return await self._get_connection(using).indices.shard_stores( index=self._name, **kwargs ) async def forcemerge( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The force merge API allows to force merging of the index through an API. The merge relates to the number of segments a Lucene index holds within each shard. The force merge operation allows to reduce the number of segments by merging them. This call will block until the merge is complete. If the http connection is lost, the request will continue in the background, and any new requests will block until the previous force merge is complete. Any additional keyword arguments will be passed to ``Elasticsearch.indices.forcemerge`` unchanged. """ return await self._get_connection(using).indices.forcemerge( index=self._name, **kwargs ) async def shrink( self, using: Optional[AsyncUsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shrink`` unchanged. """ return await self._get_connection(using).indices.shrink( index=self._name, **kwargs ) python-elasticsearch-9.1.1/elasticsearch/dsl/_async/mapping.py000066400000000000000000000033371506101734100245340ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import List, Optional, Union from typing_extensions import Self from ..async_connections import get_connection from ..mapping_base import MappingBase from ..utils import AsyncUsingType class AsyncMapping(MappingBase): @classmethod async def from_es( cls, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default" ) -> Self: m = cls() await m.update_from_es(index, using) return m async def update_from_es( self, index: Optional[Union[str, List[str]]], using: AsyncUsingType = "default" ) -> None: es = get_connection(using) raw = await es.indices.get_mapping(index=index) _, raw = raw.popitem() self._update_from_dict(raw["mappings"]) async def save(self, index: str, using: AsyncUsingType = "default") -> None: from .index import AsyncIndex i = AsyncIndex(index, using=using) i.mapping(self) await i.save() python-elasticsearch-9.1.1/elasticsearch/dsl/_async/search.py000066400000000000000000000200431506101734100243370ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import contextlib from typing import ( TYPE_CHECKING, Any, AsyncIterator, Dict, Iterator, List, Optional, cast, ) from typing_extensions import Self from elasticsearch.exceptions import ApiError from elasticsearch.helpers import async_scan from ..async_connections import get_connection from ..response import Response from ..search_base import MultiSearchBase, SearchBase from ..utils import _R, AsyncUsingType, AttrDict class AsyncSearch(SearchBase[_R]): _using: AsyncUsingType def __aiter__(self) -> AsyncIterator[_R]: """ Iterate over the hits. """ class ResultsIterator(AsyncIterator[_R]): def __init__(self, search: AsyncSearch[_R]): self.search = search self.iterator: Optional[Iterator[_R]] = None async def __anext__(self) -> _R: if self.iterator is None: self.iterator = iter(await self.search.execute()) try: return next(self.iterator) except StopIteration: raise StopAsyncIteration() return ResultsIterator(self) async def count(self) -> int: """ Return the number of hits matching the query and filters. Note that only the actual number is returned. """ if hasattr(self, "_response") and self._response.hits.total.relation == "eq": # type: ignore[attr-defined] return cast(int, self._response.hits.total.value) # type: ignore[attr-defined] es = get_connection(self._using) d = self.to_dict(count=True) # TODO: failed shards detection resp = await es.count( index=self._index, query=cast(Optional[Dict[str, Any]], d.get("query", None)), **self._params, ) return cast(int, resp["count"]) async def execute(self, ignore_cache: bool = False) -> Response[_R]: """ Execute the search and return an instance of ``Response`` wrapping all the data. :arg ignore_cache: if set to ``True``, consecutive calls will hit ES, while cached result will be ignored. Defaults to `False` """ if ignore_cache or not hasattr(self, "_response"): es = get_connection(self._using) self._response = self._response_class( self, ( await es.search( index=self._index, body=self.to_dict(), **self._params ) ).body, ) return self._response async def scan(self) -> AsyncIterator[_R]: """ Turn the search into a scan search and return a generator that will iterate over all the documents matching the query. Use the ``params`` method to specify any additional arguments you wish to pass to the underlying ``scan`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/latest/helpers.html#scan The ``iterate()`` method should be preferred, as it provides similar functionality using an Elasticsearch point in time. """ es = get_connection(self._using) async for hit in async_scan( es, query=self.to_dict(), index=self._index, **self._params ): yield self._get_result(cast(AttrDict[Any], hit)) async def delete(self) -> AttrDict[Any]: """ ``delete()`` executes the query by delegating to ``delete_by_query()``. Use the ``params`` method to specify any additional arguments you wish to pass to the underlying ``delete_by_query`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query """ es = get_connection(self._using) assert self._index is not None return AttrDict( cast( Dict[str, Any], await es.delete_by_query( index=self._index, body=self.to_dict(), **self._params ), ) ) @contextlib.asynccontextmanager async def point_in_time(self, keep_alive: str = "1m") -> AsyncIterator[Self]: """ Open a point in time (pit) that can be used across several searches. This method implements a context manager that returns a search object configured to operate within the created pit. :arg keep_alive: the time to live for the point in time, renewed with each search request """ es = get_connection(self._using) pit = await es.open_point_in_time( index=self._index or "*", keep_alive=keep_alive ) search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive}) if not search._sort: search = search.sort("_shard_doc") yield search await es.close_point_in_time(id=pit["id"]) async def iterate(self, keep_alive: str = "1m") -> AsyncIterator[_R]: """ Return a generator that iterates over all the documents matching the query. This method uses a point in time to provide consistent results even when the index is changing. It should be preferred over ``scan()``. :arg keep_alive: the time to live for the point in time, renewed with each new search request """ async with self.point_in_time(keep_alive=keep_alive) as s: while True: r = await s.execute() for hit in r: yield hit if len(r.hits) == 0: break s = s.search_after() class AsyncMultiSearch(MultiSearchBase[_R]): """ Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single request. """ _using: AsyncUsingType if TYPE_CHECKING: def add(self, search: AsyncSearch[_R]) -> Self: ... # type: ignore[override] async def execute( self, ignore_cache: bool = False, raise_on_error: bool = True ) -> List[Response[_R]]: """ Execute the multi search request and return a list of search results. """ if ignore_cache or not hasattr(self, "_response"): es = get_connection(self._using) responses = await es.msearch( index=self._index, body=self.to_dict(), **self._params ) out: List[Response[_R]] = [] for s, r in zip(self._searches, responses["responses"]): if r.get("error", False): if raise_on_error: raise ApiError("N/A", meta=responses.meta, body=r) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response class AsyncEmptySearch(AsyncSearch[_R]): async def count(self) -> int: return 0 async def execute(self, ignore_cache: bool = False) -> Response[_R]: return self._response_class(self, {"hits": {"total": 0, "hits": []}}) async def scan(self) -> AsyncIterator[_R]: return yield # a bit strange, but this forces an empty generator function async def delete(self) -> AttrDict[Any]: return AttrDict[Any]({}) python-elasticsearch-9.1.1/elasticsearch/dsl/_async/update_by_query.py000066400000000000000000000031511506101734100262740ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING from ..async_connections import get_connection from ..update_by_query_base import UpdateByQueryBase from ..utils import _R, AsyncUsingType if TYPE_CHECKING: from ..response import UpdateByQueryResponse class AsyncUpdateByQuery(UpdateByQueryBase[_R]): _using: AsyncUsingType async def execute(self) -> "UpdateByQueryResponse[_R]": """ Execute the search and return an instance of ``Response`` wrapping all the data. """ es = get_connection(self._using) assert self._index is not None self._response = self._response_class( self, ( await es.update_by_query( index=self._index, **self.to_dict(), **self._params ) ).body, ) return self._response python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/000077500000000000000000000000001506101734100223605ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/__init__.py000066400000000000000000000014231506101734100244710ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/document.py000066400000000000000000000550041506101734100245540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from typing import ( TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union, cast, ) from typing_extensions import Self, dataclass_transform from elasticsearch.exceptions import NotFoundError, RequestError from elasticsearch.helpers import bulk from .._sync.index import Index from ..connections import get_connection from ..document_base import DocumentBase, DocumentMeta, mapped_field from ..exceptions import IllegalOperation from ..utils import DOC_META_FIELDS, META_FIELDS, UsingType, merge from .search import Search if TYPE_CHECKING: from elasticsearch import Elasticsearch from elasticsearch.esql.esql import ESQLBase class IndexMeta(DocumentMeta): _index: Index # global flag to guard us from associating an Index with the base Document # class, only user defined subclasses should have an _index attr _document_initialized = False def __new__( cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] ) -> "IndexMeta": new_cls = super().__new__(cls, name, bases, attrs) if cls._document_initialized: index_opts = attrs.pop("Index", None) index = cls.construct_index(index_opts, bases) new_cls._index = index index.document(new_cls) cls._document_initialized = True return cast(IndexMeta, new_cls) @classmethod def construct_index(cls, opts: Dict[str, Any], bases: Tuple[type, ...]) -> Index: if opts is None: for b in bases: if hasattr(b, "_index"): return b._index # Set None as Index name so it will set _all while making the query return Index(name=None) i = Index(getattr(opts, "name", "*"), using=getattr(opts, "using", "default")) i.settings(**getattr(opts, "settings", {})) i.aliases(**getattr(opts, "aliases", {})) for a in getattr(opts, "analyzers", ()): i.analyzer(a) return i @dataclass_transform(field_specifiers=(mapped_field,)) class Document(DocumentBase, metaclass=IndexMeta): """ Model-like class for persisting documents in elasticsearch. """ if TYPE_CHECKING: _index: Index @classmethod def _get_using(cls, using: Optional[UsingType] = None) -> UsingType: return using or cls._index._using @classmethod def _get_connection(cls, using: Optional[UsingType] = None) -> "Elasticsearch": return get_connection(cls._get_using(using)) @classmethod def init( cls, index: Optional[str] = None, using: Optional[UsingType] = None ) -> None: """ Create the index and populate the mappings in elasticsearch. """ i = cls._index if index: i = i.clone(name=index) i.save(using=using) @classmethod def search( cls, using: Optional[UsingType] = None, index: Optional[str] = None ) -> Search[Self]: """ Create an :class:`~elasticsearch.dsl.Search` instance that will search over this ``Document``. """ return Search( using=cls._get_using(using), index=cls._default_index(index), doc_type=[cls] ) @classmethod def get( cls, id: str, using: Optional[UsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> Optional[Self]: """ Retrieve a single document from elasticsearch using its ``id``. :arg id: ``id`` of the document to be retrieved :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.get`` unchanged. """ es = cls._get_connection(using) doc = es.get(index=cls._default_index(index), id=id, **kwargs) if not doc.get("found", False): return None return cls.from_es(doc) @classmethod def exists( cls, id: str, using: Optional[UsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> bool: """ check if exists a single document from elasticsearch using its ``id``. :arg id: ``id`` of the document to check if exists :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.exists`` unchanged. """ es = cls._get_connection(using) return bool(es.exists(index=cls._default_index(index), id=id, **kwargs)) @classmethod def mget( cls, docs: List[Dict[str, Any]], using: Optional[UsingType] = None, index: Optional[str] = None, raise_on_error: bool = True, missing: str = "none", **kwargs: Any, ) -> List[Optional[Self]]: r""" Retrieve multiple document by their ``id``\s. Returns a list of instances in the same order as requested. :arg docs: list of ``id``\s of the documents to be retrieved or a list of document specifications as per https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg missing: what to do when one of the documents requested is not found. Valid options are ``'none'`` (use ``None``), ``'raise'`` (raise ``NotFoundError``) or ``'skip'`` (ignore the missing document). Any additional keyword arguments will be passed to ``Elasticsearch.mget`` unchanged. """ if missing not in ("raise", "skip", "none"): raise ValueError("'missing' must be 'raise', 'skip', or 'none'.") es = cls._get_connection(using) body = { "docs": [ doc if isinstance(doc, collections.abc.Mapping) else {"_id": doc} for doc in docs ] } results = es.mget(index=cls._default_index(index), body=body, **kwargs) objs: List[Optional[Self]] = [] error_docs: List[Self] = [] missing_docs: List[Self] = [] for doc in results["docs"]: if doc.get("found"): if error_docs or missing_docs: # We're going to raise an exception anyway, so avoid an # expensive call to cls.from_es(). continue objs.append(cls.from_es(doc)) elif doc.get("error"): if raise_on_error: error_docs.append(doc) if missing == "none": objs.append(None) # The doc didn't cause an error, but the doc also wasn't found. elif missing == "raise": missing_docs.append(doc) elif missing == "none": objs.append(None) if error_docs: error_ids = [doc["_id"] for doc in error_docs] message = "Required routing not provided for documents %s." message %= ", ".join(error_ids) raise RequestError(400, message, error_docs) # type: ignore[arg-type] if missing_docs: missing_ids = [doc["_id"] for doc in missing_docs] message = f"Documents {', '.join(missing_ids)} not found." raise NotFoundError(404, message, {"docs": missing_docs}) # type: ignore[arg-type] return objs def delete( self, using: Optional[UsingType] = None, index: Optional[str] = None, **kwargs: Any, ) -> None: """ Delete the instance in elasticsearch. :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` Any additional keyword arguments will be passed to ``Elasticsearch.delete`` unchanged. """ es = self._get_connection(using) # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} # Optimistic concurrency control if "seq_no" in self.meta and "primary_term" in self.meta: doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] doc_meta.update(kwargs) i = self._get_index(index) assert i is not None es.delete(index=i, **doc_meta) def update( self, using: Optional[UsingType] = None, index: Optional[str] = None, detect_noop: bool = True, doc_as_upsert: bool = False, refresh: bool = False, retry_on_conflict: Optional[int] = None, script: Optional[Union[str, Dict[str, Any]]] = None, script_id: Optional[str] = None, scripted_upsert: bool = False, upsert: Optional[Dict[str, Any]] = None, return_doc_meta: bool = False, **fields: Any, ) -> Any: """ Partial update of the document, specify fields you wish to update and both the instance and the document in elasticsearch will be updated:: doc = MyDocument(title='Document Title!') doc.save() doc.update(title='New Document Title!') :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg detect_noop: Set to ``False`` to disable noop detection. :arg refresh: Control when the changes made by this request are visible to search. Set to ``True`` for immediate effect. :arg retry_on_conflict: In between the get and indexing phases of the update, it is possible that another process might have already updated the same document. By default, the update will fail with a version conflict exception. The retry_on_conflict parameter controls how many times to retry the update before finally throwing an exception. :arg doc_as_upsert: Instead of sending a partial doc plus an upsert doc, setting doc_as_upsert to true will use the contents of doc as the upsert value :arg script: the source code of the script as a string, or a dictionary with script attributes to update. :arg return_doc_meta: set to ``True`` to return all metadata from the index API call instead of only the operation result :return: operation result noop/updated """ body: Dict[str, Any] = { "doc_as_upsert": doc_as_upsert, "detect_noop": detect_noop, } # scripted update if script or script_id: if upsert is not None: body["upsert"] = upsert if script: if isinstance(script, str): script = {"source": script} else: script = {"id": script_id} if "params" not in script: script["params"] = fields else: script["params"].update(fields) body["script"] = script body["scripted_upsert"] = scripted_upsert # partial document update else: if not fields: raise IllegalOperation( "You cannot call update() without updating individual fields or a script. " "If you wish to update the entire object use save()." ) # update given fields locally merge(self, fields) # prepare data for ES values = self.to_dict(skip_empty=False) # if fields were given: partial update body["doc"] = {k: values.get(k) for k in fields.keys()} # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} if retry_on_conflict is not None: doc_meta["retry_on_conflict"] = retry_on_conflict # Optimistic concurrency control if ( retry_on_conflict in (None, 0) and "seq_no" in self.meta and "primary_term" in self.meta ): doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] i = self._get_index(index) assert i is not None meta = self._get_connection(using).update( index=i, body=body, refresh=refresh, **doc_meta ) # update meta information from ES for k in META_FIELDS: if "_" + k in meta: setattr(self.meta, k, meta["_" + k]) return meta if return_doc_meta else meta["result"] def save( self, using: Optional[UsingType] = None, index: Optional[str] = None, validate: bool = True, skip_empty: bool = True, return_doc_meta: bool = False, **kwargs: Any, ) -> Any: """ Save the document into elasticsearch. If the document doesn't exist it is created, it is overwritten otherwise. Returns ``True`` if this operations resulted in new document being created. :arg index: elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg using: connection alias to use, defaults to ``'default'`` :arg validate: set to ``False`` to skip validating the document :arg skip_empty: if set to ``False`` will cause empty values (``None``, ``[]``, ``{}``) to be left on the document. Those values will be stripped out otherwise as they make no difference in elasticsearch. :arg return_doc_meta: set to ``True`` to return all metadata from the update API call instead of only the operation result Any additional keyword arguments will be passed to ``Elasticsearch.index`` unchanged. :return: operation result created/updated """ if validate: self.full_clean() es = self._get_connection(using) # extract routing etc from meta doc_meta = {k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} # Optimistic concurrency control if "seq_no" in self.meta and "primary_term" in self.meta: doc_meta["if_seq_no"] = self.meta["seq_no"] doc_meta["if_primary_term"] = self.meta["primary_term"] doc_meta.update(kwargs) i = self._get_index(index) assert i is not None meta = es.index( index=i, body=self.to_dict(skip_empty=skip_empty), **doc_meta, ) # update meta information from ES for k in META_FIELDS: if "_" + k in meta: setattr(self.meta, k, meta["_" + k]) return meta if return_doc_meta else meta["result"] @classmethod def bulk( cls, actions: Iterable[Union[Self, Dict[str, Any]]], using: Optional[UsingType] = None, index: Optional[str] = None, validate: bool = True, skip_empty: bool = True, **kwargs: Any, ) -> Tuple[int, Union[int, List[Any]]]: """ Allows to perform multiple indexing operations in a single request. :arg actions: a generator that returns document instances to be indexed, bulk operation dictionaries. :arg using: connection alias to use, defaults to ``'default'`` :arg index: Elasticsearch index to use, if the ``Document`` is associated with an index this can be omitted. :arg validate: set to ``False`` to skip validating the documents :arg skip_empty: if set to ``False`` will cause empty values (``None``, ``[]``, ``{}``) to be left on the document. Those values will be stripped out otherwise as they make no difference in Elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.bulk`` unchanged. :return: bulk operation results """ es = cls._get_connection(using) i = cls._default_index(index) assert i is not None class Generate: def __init__( self, doc_iterator: Iterable[Union[Document, Dict[str, Any]]], ): self.doc_iterator = doc_iterator.__iter__() def __iter__(self) -> Self: return self def __next__(self) -> Dict[str, Any]: doc: Optional[Union[Document, Dict[str, Any]]] = ( self.doc_iterator.__next__() ) if isinstance(doc, dict): action = doc doc = None if "_source" in action and isinstance(action["_source"], Document): doc = action["_source"] if validate: # pragma: no cover doc.full_clean() action["_source"] = doc.to_dict( include_meta=False, skip_empty=skip_empty ) elif doc is not None: if validate: # pragma: no cover doc.full_clean() action = doc.to_dict(include_meta=True, skip_empty=skip_empty) if "_index" not in action: action["_index"] = i return action return bulk(es, Generate(actions), **kwargs) @classmethod def esql_execute( cls, query: "ESQLBase", return_additional: bool = False, ignore_missing_fields: bool = False, using: Optional[UsingType] = None, **kwargs: Any, ) -> Iterator[Union[Self, Tuple[Self, Dict[str, Any]]]]: """ Execute the given ES|QL query and return an iterator of 2-element tuples, where the first element is an instance of this ``Document`` and the second a dictionary with any remaining columns requested in the query. :arg query: an ES|QL query object created with the ``esql_from()`` method. :arg return_additional: if ``False`` (the default), this method returns document objects. If set to ``True``, the method returns tuples with a document in the first element and a dictionary with any additional columns returned by the query in the second element. :arg ignore_missing_fields: if ``False`` (the default), all the fields of the document must be present in the query, or else an exception is raised. Set to ``True`` to allow missing fields, which will result in partially initialized document objects. :arg using: connection alias to use, defaults to ``'default'`` :arg kwargs: additional options for the ``client.esql.query()`` function. """ es = cls._get_connection(using) response = es.esql.query(query=str(query), **kwargs) query_columns = [col["name"] for col in response.body.get("columns", [])] # Here we get the list of columns defined in the document, which are the # columns that we will take from each result to assemble the document # object. # When `for_esql=False` is passed below by default, the list will include # nested fields, which ES|QL does not return, causing an error. When passing # `ignore_missing_fields=True` the list will be generated with # `for_esql=True`, so the error will not occur, but the documents will # not have any Nested objects in them. doc_fields = set(cls._get_field_names(for_esql=ignore_missing_fields)) if not ignore_missing_fields and not doc_fields.issubset(set(query_columns)): raise ValueError( f"Not all fields of {cls.__name__} were returned by the query. " "Make sure your document does not use Nested fields, which are " "currently not supported in ES|QL. To force the query to be " "evaluated in spite of the missing fields, pass set the " "ignore_missing_fields=True option in the esql_execute() call." ) non_doc_fields: set[str] = set(query_columns) - doc_fields - {"_id"} index_id = query_columns.index("_id") results = response.body.get("values", []) for column_values in results: # create a dictionary with all the document fields, expanding the # dot notation returned by ES|QL into the recursive dictionaries # used by Document.from_dict() doc_dict: Dict[str, Any] = {} for col, val in zip(query_columns, column_values): if col in doc_fields: cols = col.split(".") d = doc_dict for c in cols[:-1]: if c not in d: d[c] = {} d = d[c] d[cols[-1]] = val # create the document instance obj = cls(meta={"_id": column_values[index_id]}) obj._from_dict(doc_dict) if return_additional: # build a dict with any other values included in the response other = { col: val for col, val in zip(query_columns, column_values) if col in non_doc_fields } yield obj, other else: yield obj python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/faceted_search.py000066400000000000000000000032361506101734100256560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING from ..faceted_search_base import FacetedResponse, FacetedSearchBase from ..utils import _R from .search import Search if TYPE_CHECKING: from ..response import Response class FacetedSearch(FacetedSearchBase[_R]): _s: Search[_R] def count(self) -> int: return self._s.count() def search(self) -> Search[_R]: """ Returns the base Search object to which the facets are added. You can customize the query by overriding this method and returning a modified search object. """ s = Search[_R](doc_type=self.doc_types, index=self.index, using=self.using) return s.response_class(FacetedResponse) def execute(self) -> "Response[_R]": """ Execute the search and return the response. """ r = self._s.execute() r._faceted_search = self return r python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/index.py000066400000000000000000000522031506101734100240430ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING, Any, Dict, Optional from typing_extensions import Self from ..connections import get_connection from ..exceptions import IllegalOperation from ..index_base import IndexBase from ..utils import UsingType from .mapping import Mapping from .search import Search from .update_by_query import UpdateByQuery if TYPE_CHECKING: from elastic_transport import ObjectApiResponse from elasticsearch import Elasticsearch class IndexTemplate: def __init__( self, name: str, template: str, index: Optional["Index"] = None, order: Optional[int] = None, **kwargs: Any, ): if index is None: self._index = Index(template, **kwargs) else: if kwargs: raise ValueError( "You cannot specify options for Index when" " passing an Index instance." ) self._index = index.clone() self._index._name = template self._template_name = name self.order = order def __getattr__(self, attr_name: str) -> Any: return getattr(self._index, attr_name) def to_dict(self) -> Dict[str, Any]: d = self._index.to_dict() d["index_patterns"] = [self._index._name] if self.order is not None: d["order"] = self.order return d def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]": es = get_connection(using or self._index._using) return es.indices.put_template(name=self._template_name, body=self.to_dict()) class ComposableIndexTemplate: def __init__( self, name: str, template: str, index: Optional["Index"] = None, priority: Optional[int] = None, **kwargs: Any, ): if index is None: self._index = Index(template, **kwargs) else: if kwargs: raise ValueError( "You cannot specify options for Index when" " passing an Index instance." ) self._index = index.clone() self._index._name = template self._template_name = name self.priority = priority def __getattr__(self, attr_name: str) -> Any: return getattr(self._index, attr_name) def to_dict(self) -> Dict[str, Any]: d: Dict[str, Any] = {"template": self._index.to_dict()} d["index_patterns"] = [self._index._name] if self.priority is not None: d["priority"] = self.priority return d def save(self, using: Optional[UsingType] = None) -> "ObjectApiResponse[Any]": es = get_connection(using or self._index._using) return es.indices.put_index_template(name=self._template_name, **self.to_dict()) class Index(IndexBase): _using: UsingType if TYPE_CHECKING: def get_or_create_mapping(self) -> Mapping: ... def __init__(self, name: str, using: UsingType = "default"): """ :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ super().__init__(name, Mapping, using=using) def _get_connection(self, using: Optional[UsingType] = None) -> "Elasticsearch": if self._name is None: raise ValueError("You cannot perform API calls on the default index.") return get_connection(using or self._using) connection = property(_get_connection) def as_template( self, template_name: str, pattern: Optional[str] = None, order: Optional[int] = None, ) -> IndexTemplate: return IndexTemplate( template_name, pattern or self._name, index=self, order=order ) def as_composable_template( self, template_name: str, pattern: Optional[str] = None, priority: Optional[int] = None, ) -> ComposableIndexTemplate: return ComposableIndexTemplate( template_name, pattern or self._name, index=self, priority=priority ) def load_mappings(self, using: Optional[UsingType] = None) -> None: self.get_or_create_mapping().update_from_es( self._name, using=using or self._using ) def clone( self, name: Optional[str] = None, using: Optional[UsingType] = None ) -> Self: """ Create a copy of the instance with another name or connection alias. Useful for creating multiple indices with shared configuration:: i = Index('base-index') i.settings(number_of_shards=1) i.create() i2 = i.clone('other-index') i2.create() :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ i = self.__class__(name or self._name, using=using or self._using) i._settings = self._settings.copy() i._aliases = self._aliases.copy() i._analysis = self._analysis.copy() i._doc_types = self._doc_types[:] if self._mapping is not None: i._mapping = self._mapping._clone() return i def search(self, using: Optional[UsingType] = None) -> Search: """ Return a :class:`~elasticsearch.dsl.Search` object searching over the index (or all the indices belonging to this template) and its ``Document``\\s. """ return Search( using=using or self._using, index=self._name, doc_type=self._doc_types ) def updateByQuery(self, using: Optional[UsingType] = None) -> UpdateByQuery: """ Return a :class:`~elasticsearch.dsl.UpdateByQuery` object searching over the index (or all the indices belonging to this template) and updating Documents that match the search criteria. For more information, see here: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html """ return UpdateByQuery( using=using or self._using, index=self._name, ) def create( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Creates the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.create`` unchanged. """ return self._get_connection(using).indices.create( index=self._name, body=self.to_dict(), **kwargs ) def is_closed(self, using: Optional[UsingType] = None) -> bool: state = self._get_connection(using).cluster.state( index=self._name, metric="metadata" ) return bool(state["metadata"]["indices"][self._name]["state"] == "close") def save( self, using: Optional[UsingType] = None ) -> "Optional[ObjectApiResponse[Any]]": """ Sync the index definition with elasticsearch, creating the index if it doesn't exist and updating its settings and mappings if it does. Note some settings and mapping changes cannot be done on an open index (or at all on an existing index) and for those this method will fail with the underlying exception. """ if not self.exists(using=using): return self.create(using=using) body = self.to_dict() settings = body.pop("settings", {}) analysis = settings.pop("analysis", None) current_settings = (self.get_settings(using=using))[self._name]["settings"][ "index" ] if analysis: if self.is_closed(using=using): # closed index, update away settings["analysis"] = analysis else: # compare analysis definition, if all analysis objects are # already defined as requested, skip analysis update and # proceed, otherwise raise IllegalOperation existing_analysis = current_settings.get("analysis", {}) if any( existing_analysis.get(section, {}).get(k, None) != analysis[section][k] for section in analysis for k in analysis[section] ): raise IllegalOperation( "You cannot update analysis configuration on an open index, " "you need to close index %s first." % self._name ) # try and update the settings if settings: settings = settings.copy() for k, v in list(settings.items()): if k in current_settings and current_settings[k] == str(v): del settings[k] if settings: self.put_settings(using=using, body=settings) # update the mappings, any conflict in the mappings will result in an # exception mappings = body.pop("mappings", {}) if mappings: return self.put_mapping(using=using, body=mappings) return None def analyze( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Perform the analysis process on a text and return the tokens breakdown of the text. Any additional keyword arguments will be passed to ``Elasticsearch.indices.analyze`` unchanged. """ return self._get_connection(using).indices.analyze(index=self._name, **kwargs) def refresh( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Performs a refresh operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.refresh`` unchanged. """ return self._get_connection(using).indices.refresh(index=self._name, **kwargs) def flush( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Performs a flush operation on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.flush`` unchanged. """ return self._get_connection(using).indices.flush(index=self._name, **kwargs) def get( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged. """ return self._get_connection(using).indices.get(index=self._name, **kwargs) def open( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Opens the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.open`` unchanged. """ return self._get_connection(using).indices.open(index=self._name, **kwargs) def close( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Closes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.close`` unchanged. """ return self._get_connection(using).indices.close(index=self._name, **kwargs) def delete( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Deletes the index in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete`` unchanged. """ return self._get_connection(using).indices.delete(index=self._name, **kwargs) def exists(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool: """ Returns ``True`` if the index already exists in elasticsearch. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists`` unchanged. """ return bool( self._get_connection(using).indices.exists(index=self._name, **kwargs) ) def put_mapping( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Register specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_mapping`` unchanged. """ return self._get_connection(using).indices.put_mapping( index=self._name, **kwargs ) def get_mapping( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve specific mapping definition for a specific type. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_mapping`` unchanged. """ return self._get_connection(using).indices.get_mapping( index=self._name, **kwargs ) def get_field_mapping( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve mapping definition of a specific field. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_field_mapping`` unchanged. """ return self._get_connection(using).indices.get_field_mapping( index=self._name, **kwargs ) def put_alias( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Create an alias for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_alias`` unchanged. """ return self._get_connection(using).indices.put_alias(index=self._name, **kwargs) def exists_alias(self, using: Optional[UsingType] = None, **kwargs: Any) -> bool: """ Return a boolean indicating whether given alias exists for this index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.exists_alias`` unchanged. """ return bool( self._get_connection(using).indices.exists_alias(index=self._name, **kwargs) ) def get_alias( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve a specified alias. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_alias`` unchanged. """ return self._get_connection(using).indices.get_alias(index=self._name, **kwargs) def delete_alias( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Delete specific alias. Any additional keyword arguments will be passed to ``Elasticsearch.indices.delete_alias`` unchanged. """ return self._get_connection(using).indices.delete_alias( index=self._name, **kwargs ) def get_settings( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve settings for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get_settings`` unchanged. """ return self._get_connection(using).indices.get_settings( index=self._name, **kwargs ) def put_settings( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Change specific index level settings in real time. Any additional keyword arguments will be passed to ``Elasticsearch.indices.put_settings`` unchanged. """ return self._get_connection(using).indices.put_settings( index=self._name, **kwargs ) def stats( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Retrieve statistics on different operations happening on the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.stats`` unchanged. """ return self._get_connection(using).indices.stats(index=self._name, **kwargs) def segments( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Provide low level segments information that a Lucene index (shard level) is built with. Any additional keyword arguments will be passed to ``Elasticsearch.indices.segments`` unchanged. """ return self._get_connection(using).indices.segments(index=self._name, **kwargs) def validate_query( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Validate a potentially expensive query without executing it. Any additional keyword arguments will be passed to ``Elasticsearch.indices.validate_query`` unchanged. """ return self._get_connection(using).indices.validate_query( index=self._name, **kwargs ) def clear_cache( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Clear all caches or specific cached associated with the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.clear_cache`` unchanged. """ return self._get_connection(using).indices.clear_cache( index=self._name, **kwargs ) def recovery( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The indices recovery API provides insight into on-going shard recoveries for the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.recovery`` unchanged. """ return self._get_connection(using).indices.recovery(index=self._name, **kwargs) def shard_stores( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ Provides store information for shard copies of the index. Store information reports on which nodes shard copies exist, the shard copy version, indicating how recent they are, and any exceptions encountered while opening the shard index or from earlier engine failure. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shard_stores`` unchanged. """ return self._get_connection(using).indices.shard_stores( index=self._name, **kwargs ) def forcemerge( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The force merge API allows to force merging of the index through an API. The merge relates to the number of segments a Lucene index holds within each shard. The force merge operation allows to reduce the number of segments by merging them. This call will block until the merge is complete. If the http connection is lost, the request will continue in the background, and any new requests will block until the previous force merge is complete. Any additional keyword arguments will be passed to ``Elasticsearch.indices.forcemerge`` unchanged. """ return self._get_connection(using).indices.forcemerge( index=self._name, **kwargs ) def shrink( self, using: Optional[UsingType] = None, **kwargs: Any ) -> "ObjectApiResponse[Any]": """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. Any additional keyword arguments will be passed to ``Elasticsearch.indices.shrink`` unchanged. """ return self._get_connection(using).indices.shrink(index=self._name, **kwargs) python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/mapping.py000066400000000000000000000032221506101734100243640ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import List, Optional, Union from typing_extensions import Self from ..connections import get_connection from ..mapping_base import MappingBase from ..utils import UsingType class Mapping(MappingBase): @classmethod def from_es( cls, index: Optional[Union[str, List[str]]], using: UsingType = "default" ) -> Self: m = cls() m.update_from_es(index, using) return m def update_from_es( self, index: Optional[Union[str, List[str]]], using: UsingType = "default" ) -> None: es = get_connection(using) raw = es.indices.get_mapping(index=index) _, raw = raw.popitem() self._update_from_dict(raw["mappings"]) def save(self, index: str, using: UsingType = "default") -> None: from .index import Index i = Index(index, using=using) i.mapping(self) i.save() python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/search.py000066400000000000000000000173111506101734100242020ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import contextlib from typing import ( TYPE_CHECKING, Any, Dict, Iterator, List, Optional, cast, ) from typing_extensions import Self from elasticsearch.exceptions import ApiError from elasticsearch.helpers import scan from ..connections import get_connection from ..response import Response from ..search_base import MultiSearchBase, SearchBase from ..utils import _R, AttrDict, UsingType class Search(SearchBase[_R]): _using: UsingType def __iter__(self) -> Iterator[_R]: """ Iterate over the hits. """ class ResultsIterator(Iterator[_R]): def __init__(self, search: Search[_R]): self.search = search self.iterator: Optional[Iterator[_R]] = None def __next__(self) -> _R: if self.iterator is None: self.iterator = iter(self.search.execute()) try: return next(self.iterator) except StopIteration: raise StopIteration() return ResultsIterator(self) def count(self) -> int: """ Return the number of hits matching the query and filters. Note that only the actual number is returned. """ if hasattr(self, "_response") and self._response.hits.total.relation == "eq": # type: ignore[attr-defined] return cast(int, self._response.hits.total.value) # type: ignore[attr-defined] es = get_connection(self._using) d = self.to_dict(count=True) # TODO: failed shards detection resp = es.count( index=self._index, query=cast(Optional[Dict[str, Any]], d.get("query", None)), **self._params, ) return cast(int, resp["count"]) def execute(self, ignore_cache: bool = False) -> Response[_R]: """ Execute the search and return an instance of ``Response`` wrapping all the data. :arg ignore_cache: if set to ``True``, consecutive calls will hit ES, while cached result will be ignored. Defaults to `False` """ if ignore_cache or not hasattr(self, "_response"): es = get_connection(self._using) self._response = self._response_class( self, ( es.search(index=self._index, body=self.to_dict(), **self._params) ).body, ) return self._response def scan(self) -> Iterator[_R]: """ Turn the search into a scan search and return a generator that will iterate over all the documents matching the query. Use the ``params`` method to specify any additional arguments you wish to pass to the underlying ``scan`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/latest/helpers.html#scan The ``iterate()`` method should be preferred, as it provides similar functionality using an Elasticsearch point in time. """ es = get_connection(self._using) for hit in scan(es, query=self.to_dict(), index=self._index, **self._params): yield self._get_result(cast(AttrDict[Any], hit)) def delete(self) -> AttrDict[Any]: """ ``delete()`` executes the query by delegating to ``delete_by_query()``. Use the ``params`` method to specify any additional arguments you wish to pass to the underlying ``delete_by_query`` helper from ``elasticsearch-py`` - https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.delete_by_query """ es = get_connection(self._using) assert self._index is not None return AttrDict( cast( Dict[str, Any], es.delete_by_query( index=self._index, body=self.to_dict(), **self._params ), ) ) @contextlib.contextmanager def point_in_time(self, keep_alive: str = "1m") -> Iterator[Self]: """ Open a point in time (pit) that can be used across several searches. This method implements a context manager that returns a search object configured to operate within the created pit. :arg keep_alive: the time to live for the point in time, renewed with each search request """ es = get_connection(self._using) pit = es.open_point_in_time(index=self._index or "*", keep_alive=keep_alive) search = self.index().extra(pit={"id": pit["id"], "keep_alive": keep_alive}) if not search._sort: search = search.sort("_shard_doc") yield search es.close_point_in_time(id=pit["id"]) def iterate(self, keep_alive: str = "1m") -> Iterator[_R]: """ Return a generator that iterates over all the documents matching the query. This method uses a point in time to provide consistent results even when the index is changing. It should be preferred over ``scan()``. :arg keep_alive: the time to live for the point in time, renewed with each new search request """ with self.point_in_time(keep_alive=keep_alive) as s: while True: r = s.execute() for hit in r: yield hit if len(r.hits) == 0: break s = s.search_after() class MultiSearch(MultiSearchBase[_R]): """ Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single request. """ _using: UsingType if TYPE_CHECKING: def add(self, search: Search[_R]) -> Self: ... # type: ignore[override] def execute( self, ignore_cache: bool = False, raise_on_error: bool = True ) -> List[Response[_R]]: """ Execute the multi search request and return a list of search results. """ if ignore_cache or not hasattr(self, "_response"): es = get_connection(self._using) responses = es.msearch( index=self._index, body=self.to_dict(), **self._params ) out: List[Response[_R]] = [] for s, r in zip(self._searches, responses["responses"]): if r.get("error", False): if raise_on_error: raise ApiError("N/A", meta=responses.meta, body=r) r = None else: r = Response(s, r) out.append(r) self._response = out return self._response class EmptySearch(Search[_R]): def count(self) -> int: return 0 def execute(self, ignore_cache: bool = False) -> Response[_R]: return self._response_class(self, {"hits": {"total": 0, "hits": []}}) def scan(self) -> Iterator[_R]: return yield # a bit strange, but this forces an empty generator function def delete(self) -> AttrDict[Any]: return AttrDict[Any]({}) python-elasticsearch-9.1.1/elasticsearch/dsl/_sync/update_by_query.py000066400000000000000000000030421506101734100261320ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING from ..connections import get_connection from ..update_by_query_base import UpdateByQueryBase from ..utils import _R, UsingType if TYPE_CHECKING: from ..response import UpdateByQueryResponse class UpdateByQuery(UpdateByQueryBase[_R]): _using: UsingType def execute(self) -> "UpdateByQueryResponse[_R]": """ Execute the search and return an instance of ``Response`` wrapping all the data. """ es = get_connection(self._using) assert self._index is not None self._response = self._response_class( self, ( es.update_by_query(index=self._index, **self.to_dict(), **self._params) ).body, ) return self._response python-elasticsearch-9.1.1/elasticsearch/dsl/aggs.py000066400000000000000000004027071506101734100225520ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import deepcopy from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, Generic, Iterable, Literal, Mapping, MutableMapping, Optional, Sequence, Union, cast, ) from elastic_transport.client_utils import DEFAULT from . import wrappers from .query import Query from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData from .utils import _R, AttrDict, DslBase if TYPE_CHECKING: from elastic_transport.client_utils import DefaultType from . import types from .document_base import InstrumentedField from .search_base import SearchBase def A( name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str], filter: Optional[Union[str, "Query"]] = None, **params: Any, ) -> "Agg[_R]": if filter is not None: if name_or_agg != "filter": raise ValueError( "Aggregation %r doesn't accept positional argument 'filter'." % name_or_agg ) params["filter"] = filter # {"terms": {"field": "tags"}, "aggs": {...}} if isinstance(name_or_agg, collections.abc.MutableMapping): if params: raise ValueError("A() cannot accept parameters when passing in a dict.") # copy to avoid modifying in-place agg = deepcopy(name_or_agg) # pop out nested aggs aggs = agg.pop("aggs", None) # pop out meta data meta = agg.pop("meta", None) # should be {"terms": {"field": "tags"}} if len(agg) != 1: raise ValueError( 'A() can only accept dict with an aggregation ({"terms": {...}}). ' "Instead it got (%r)" % name_or_agg ) agg_type, params = agg.popitem() if aggs: params = params.copy() params["aggs"] = aggs if meta: params = params.copy() params["meta"] = meta return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params) # Terms(...) just return the nested agg elif isinstance(name_or_agg, Agg): if params: raise ValueError( "A() cannot accept parameters when passing in an Agg object." ) return name_or_agg # "terms", field="tags" return Agg[_R].get_dsl_class(name_or_agg)(**params) class Agg(DslBase, Generic[_R]): _type_name = "agg" _type_shortcut = staticmethod(A) name = "" def __contains__(self, key: str) -> bool: return False def to_dict(self) -> Dict[str, Any]: d = super().to_dict() if isinstance(d[self.name], dict): n = cast(Dict[str, Any], d[self.name]) if "meta" in n: d["meta"] = n.pop("meta") return d def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]: return AggResponse[_R](self, search, data) class AggBase(Generic[_R]): aggs: Dict[str, Agg[_R]] _base: Agg[_R] _params: Dict[str, Any] _param_defs: ClassVar[Dict[str, Any]] = { "aggs": {"type": "agg", "hash": True}, } def __contains__(self, key: str) -> bool: return key in self._params.get("aggs", {}) def __getitem__(self, agg_name: str) -> Agg[_R]: agg = cast( Agg[_R], self._params.setdefault("aggs", {})[agg_name] ) # propagate KeyError # make sure we're not mutating a shared state - whenever accessing a # bucket, return a shallow copy of it to be safe if isinstance(agg, Bucket): agg = A(agg.name, **agg._params) # be sure to store the copy so any modifications to it will affect us self._params["aggs"][agg_name] = agg return agg def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None: self.aggs[agg_name] = A(agg) def __iter__(self) -> Iterable[str]: return iter(self.aggs) def _agg( self, bucket: bool, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> Agg[_R]: agg = self[name] = A(agg_type, *args, **params) # For chaining - when creating new buckets return them... if bucket: return agg # otherwise return self._base so we can keep chaining else: return self._base def metric( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> Agg[_R]: return self._agg(False, name, agg_type, *args, **params) def bucket( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> "Bucket[_R]": return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params)) def pipeline( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> "Pipeline[_R]": return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params)) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return BucketData(self, search, data) # type: ignore[arg-type] class Bucket(AggBase[_R], Agg[_R]): def __init__(self, **params: Any): super().__init__(**params) # remember self for chaining self._base = self def to_dict(self) -> Dict[str, Any]: d = super(AggBase, self).to_dict() if isinstance(d[self.name], dict): n = cast(AttrDict[Any], d[self.name]) if "aggs" in n: d["aggs"] = n.pop("aggs") return d class Pipeline(Agg[_R]): pass class AdjacencyMatrix(Bucket[_R]): """ A bucket aggregation returning a form of adjacency matrix. The request provides a collection of named filter expressions, similar to the `filters` aggregation. Each bucket in the response represents a non- empty cell in the matrix of intersecting filters. :arg filters: Filters used to create buckets. At least one filter is required. :arg separator: Separator used to concatenate filter names. Defaults to &. """ name = "adjacency_matrix" _param_defs = { "filters": {"type": "query", "hash": True}, } def __init__( self, *, filters: Union[Mapping[str, Query], "DefaultType"] = DEFAULT, separator: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(filters=filters, separator=separator, **kwargs) class AutoDateHistogram(Bucket[_R]): """ A multi-bucket aggregation similar to the date histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. :arg buckets: The target number of buckets. Defaults to `10` if omitted. :arg field: The field on which to run the aggregation. :arg format: The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. :arg minimum_interval: The minimum rounding interval. This can make the collection process more efficient, as the aggregation will not attempt to round at any interval lower than `minimum_interval`. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg offset: Time zone specified as a ISO 8601 UTC offset. :arg params: :arg script: :arg time_zone: Time zone ID. """ name = "auto_date_histogram" def __init__( self, *, buckets: Union[int, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, minimum_interval: Union[ Literal["second", "minute", "hour", "day", "month", "year"], "DefaultType" ] = DEFAULT, missing: Any = DEFAULT, offset: Union[str, "DefaultType"] = DEFAULT, params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_zone: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( buckets=buckets, field=field, format=format, minimum_interval=minimum_interval, missing=missing, offset=offset, params=params, script=script, time_zone=time_zone, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) class Avg(Agg[_R]): """ A single-value metrics aggregation that computes the average of numeric values that are extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "avg" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class AvgBucket(Pipeline[_R]): """ A sibling pipeline aggregation which calculates the mean value of a specified metric in a sibling aggregation. The specified metric must be numeric and the sibling aggregation must be a multi-bucket aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "avg_bucket" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class Boxplot(Agg[_R]): """ A metrics aggregation that computes a box plot of numeric values extracted from the aggregated documents. :arg compression: Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. :arg execution_hint: The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. Defaults to `default` if omitted. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "boxplot" def __init__( self, *, compression: Union[float, "DefaultType"] = DEFAULT, execution_hint: Union[ Literal["default", "high_accuracy"], "DefaultType" ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( compression=compression, execution_hint=execution_hint, field=field, missing=missing, script=script, **kwargs, ) class BucketScript(Pipeline[_R]): """ A parent pipeline aggregation which runs a script which can perform per bucket computations on metrics in the parent multi-bucket aggregation. :arg script: The script to run for this aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "bucket_script" def __init__( self, *, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( script=script, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class BucketSelector(Pipeline[_R]): """ A parent pipeline aggregation which runs a script to determine whether the current bucket will be retained in the parent multi-bucket aggregation. :arg script: The script to run for this aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "bucket_selector" def __init__( self, *, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( script=script, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class BucketSort(Bucket[_R]): """ A parent pipeline aggregation which sorts the buckets of its parent multi-bucket aggregation. :arg from: Buckets in positions prior to `from` will be truncated. :arg gap_policy: The policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg size: The number of buckets to return. Defaults to all buckets of the parent aggregation. :arg sort: The list of fields to sort on. """ name = "bucket_sort" def __init__( self, *, from_: Union[int, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, sort: Union[ Union[Union[str, "InstrumentedField"], "types.SortOptions"], Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], Dict[str, Any], "DefaultType", ] = DEFAULT, **kwargs: Any, ): super().__init__( from_=from_, gap_policy=gap_policy, size=size, sort=sort, **kwargs ) class BucketCountKsTest(Pipeline[_R]): """ A sibling pipeline aggregation which runs a two sample Kolmogorov–Smirnov test ("K-S test") against a provided distribution and the distribution implied by the documents counts in the configured sibling aggregation. :arg alternative: A list of string values indicating which K-S test alternative to calculate. The valid values are: "greater", "less", "two_sided". This parameter is key for determining the K-S statistic used when calculating the K-S test. Default value is all possible alternative hypotheses. :arg fractions: A list of doubles indicating the distribution of the samples with which to compare to the `buckets_path` results. In typical usage this is the overall proportion of documents in each bucket, which is compared with the actual document proportions in each bucket from the sibling aggregation counts. The default is to assume that overall documents are uniformly distributed on these buckets, which they would be if one used equal percentiles of a metric to define the bucket end points. :arg sampling_method: Indicates the sampling methodology when calculating the K-S test. Note, this is sampling of the returned values. This determines the cumulative distribution function (CDF) points used comparing the two samples. Default is `upper_tail`, which emphasizes the upper end of the CDF points. Valid options are: `upper_tail`, `uniform`, and `lower_tail`. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "bucket_count_ks_test" def __init__( self, *, alternative: Union[Sequence[str], "DefaultType"] = DEFAULT, fractions: Union[Sequence[float], "DefaultType"] = DEFAULT, sampling_method: Union[str, "DefaultType"] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( alternative=alternative, fractions=fractions, sampling_method=sampling_method, buckets_path=buckets_path, **kwargs, ) class BucketCorrelation(Pipeline[_R]): """ A sibling pipeline aggregation which runs a correlation function on the configured sibling multi-bucket aggregation. :arg function: (required) The correlation function to execute. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "bucket_correlation" def __init__( self, *, function: Union[ "types.BucketCorrelationFunction", Dict[str, Any], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__(function=function, buckets_path=buckets_path, **kwargs) class Cardinality(Agg[_R]): """ A single-value metrics aggregation that calculates an approximate count of distinct values. :arg precision_threshold: A unique count below which counts are expected to be close to accurate. This allows to trade memory for accuracy. Defaults to `3000` if omitted. :arg rehash: :arg execution_hint: Mechanism by which cardinality aggregations is run. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "cardinality" def __init__( self, *, precision_threshold: Union[int, "DefaultType"] = DEFAULT, rehash: Union[bool, "DefaultType"] = DEFAULT, execution_hint: Union[ Literal[ "global_ordinals", "segment_ordinals", "direct", "save_memory_heuristic", "save_time_heuristic", ], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( precision_threshold=precision_threshold, rehash=rehash, execution_hint=execution_hint, field=field, missing=missing, script=script, **kwargs, ) class CategorizeText(Bucket[_R]): """ A multi-bucket aggregation that groups semi-structured text into buckets. :arg field: (required) The semi-structured text field to categorize. :arg max_unique_tokens: The maximum number of unique tokens at any position up to max_matched_tokens. Must be larger than 1. Smaller values use less memory and create fewer categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. Defaults to `50` if omitted. :arg max_matched_tokens: The maximum number of token positions to match on before attempting to merge categories. Larger values will use more memory and create narrower categories. Max allowed value is 100. Defaults to `5` if omitted. :arg similarity_threshold: The minimum percentage of tokens that must match for text to be added to the category bucket. Must be between 1 and 100. The larger the value the narrower the categories. Larger values will increase memory usage and create narrower categories. Defaults to `50` if omitted. :arg categorization_filters: This property expects an array of regular expressions. The expressions are used to filter out matching sequences from the categorization field values. You can use this functionality to fine tune the categorization by excluding sequences from consideration when categories are defined. For example, you can exclude SQL statements that appear in your log files. This property cannot be used at the same time as categorization_analyzer. If you only want to define simple regular expression filters that are applied prior to tokenization, setting this property is the easiest method. If you also want to customize the tokenizer or post-tokenization filtering, use the categorization_analyzer property instead and include the filters as pattern_replace character filters. :arg categorization_analyzer: The categorization analyzer specifies how the text is analyzed and tokenized before being categorized. The syntax is very similar to that used to define the analyzer in the analyze API. This property cannot be used at the same time as `categorization_filters`. :arg shard_size: The number of categorization buckets to return from each shard before merging all the results. :arg size: The number of buckets to return. Defaults to `10` if omitted. :arg min_doc_count: The minimum number of documents in a bucket to be returned to the results. :arg shard_min_doc_count: The minimum number of documents in a bucket to be returned from the shard before merging. """ name = "categorize_text" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, max_unique_tokens: Union[int, "DefaultType"] = DEFAULT, max_matched_tokens: Union[int, "DefaultType"] = DEFAULT, similarity_threshold: Union[int, "DefaultType"] = DEFAULT, categorization_filters: Union[Sequence[str], "DefaultType"] = DEFAULT, categorization_analyzer: Union[ str, "types.CustomCategorizeTextAnalyzer", Dict[str, Any], "DefaultType" ] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, max_unique_tokens=max_unique_tokens, max_matched_tokens=max_matched_tokens, similarity_threshold=similarity_threshold, categorization_filters=categorization_filters, categorization_analyzer=categorization_analyzer, shard_size=shard_size, size=size, min_doc_count=min_doc_count, shard_min_doc_count=shard_min_doc_count, **kwargs, ) class Children(Bucket[_R]): """ A single bucket aggregation that selects child documents that have the specified type, as defined in a `join` field. :arg type: The child type that should be selected. """ name = "children" def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any): super().__init__(type=type, **kwargs) class Composite(Bucket[_R]): """ A multi-bucket aggregation that creates composite buckets from different sources. Unlike the other multi-bucket aggregations, you can use the `composite` aggregation to paginate *all* buckets from a multi-level aggregation efficiently. :arg after: When paginating, use the `after_key` value returned in the previous response to retrieve the next page. :arg size: The number of composite buckets that should be returned. Defaults to `10` if omitted. :arg sources: The value sources used to build composite buckets. Keys are returned in the order of the `sources` definition. """ name = "composite" def __init__( self, *, after: Union[ Mapping[ Union[str, "InstrumentedField"], Union[int, float, str, bool, None] ], "DefaultType", ] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, sources: Union[Sequence[Mapping[str, Agg[_R]]], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(after=after, size=size, sources=sources, **kwargs) class CumulativeCardinality(Pipeline[_R]): """ A parent pipeline aggregation which calculates the cumulative cardinality in a parent `histogram` or `date_histogram` aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "cumulative_cardinality" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class CumulativeSum(Pipeline[_R]): """ A parent pipeline aggregation which calculates the cumulative sum of a specified metric in a parent `histogram` or `date_histogram` aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "cumulative_sum" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class DateHistogram(Bucket[_R]): """ A multi-bucket values source based aggregation that can be applied on date values or date range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. :arg calendar_interval: Calendar-aware interval. Can be specified using the unit name, such as `month`, or as a single unit quantity, such as `1M`. :arg extended_bounds: Enables extending the bounds of the histogram beyond the data itself. :arg hard_bounds: Limits the histogram to specified bounds. :arg field: The date field whose values are use to build a histogram. :arg fixed_interval: Fixed intervals: a fixed number of SI units and never deviate, regardless of where they fall on the calendar. :arg format: The date format used to format `key_as_string` in the response. If no `format` is specified, the first date format specified in the field mapping is used. :arg interval: :arg min_doc_count: Only returns buckets that have `min_doc_count` number of documents. By default, all buckets between the first bucket that matches documents and the last one are returned. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg offset: Changes the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration. :arg order: The sort order of the returned buckets. :arg params: :arg script: :arg time_zone: Time zone used for bucketing and rounding. Defaults to Coordinated Universal Time (UTC). :arg keyed: Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. """ name = "date_histogram" def __init__( self, *, calendar_interval: Union[ Literal[ "second", "minute", "hour", "day", "week", "month", "quarter", "year" ], "DefaultType", ] = DEFAULT, extended_bounds: Union[ "types.ExtendedBounds", Dict[str, Any], "DefaultType" ] = DEFAULT, hard_bounds: Union[ "types.ExtendedBounds", Dict[str, Any], "DefaultType" ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, fixed_interval: Any = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, interval: Any = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, missing: Any = DEFAULT, offset: Any = DEFAULT, order: Union[ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], "DefaultType", ] = DEFAULT, params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_zone: Union[str, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( calendar_interval=calendar_interval, extended_bounds=extended_bounds, hard_bounds=hard_bounds, field=field, fixed_interval=fixed_interval, format=format, interval=interval, min_doc_count=min_doc_count, missing=missing, offset=offset, order=order, params=params, script=script, time_zone=time_zone, keyed=keyed, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) class DateRange(Bucket[_R]): """ A multi-bucket value source based aggregation that enables the user to define a set of date ranges - each representing a bucket. :arg field: The date field whose values are use to build ranges. :arg format: The date format used to format `from` and `to` in the response. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg ranges: Array of date ranges. :arg time_zone: Time zone used to convert dates from another time zone to UTC. :arg keyed: Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. """ name = "date_range" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, ranges: Union[ Sequence["wrappers.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, time_zone: Union[str, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, format=format, missing=missing, ranges=ranges, time_zone=time_zone, keyed=keyed, **kwargs, ) class Derivative(Pipeline[_R]): """ A parent pipeline aggregation which calculates the derivative of a specified metric in a parent `histogram` or `date_histogram` aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "derivative" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class DiversifiedSampler(Bucket[_R]): """ A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. Similar to the `sampler` aggregation, but adds the ability to limit the number of matches that share a common value. :arg execution_hint: The type of value used for de-duplication. Defaults to `global_ordinals` if omitted. :arg max_docs_per_value: Limits how many documents are permitted per choice of de-duplicating value. Defaults to `1` if omitted. :arg script: :arg shard_size: Limits how many top-scoring documents are collected in the sample processed on each shard. Defaults to `100` if omitted. :arg field: The field used to provide values used for de-duplication. """ name = "diversified_sampler" def __init__( self, *, execution_hint: Union[ Literal["map", "global_ordinals", "bytes_hash"], "DefaultType" ] = DEFAULT, max_docs_per_value: Union[int, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( execution_hint=execution_hint, max_docs_per_value=max_docs_per_value, script=script, shard_size=shard_size, field=field, **kwargs, ) class ExtendedStats(Agg[_R]): """ A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. :arg sigma: The number of standard deviations above/below the mean to display. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "extended_stats" def __init__( self, *, sigma: Union[float, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( sigma=sigma, format=format, field=field, missing=missing, script=script, **kwargs, ) class ExtendedStatsBucket(Pipeline[_R]): """ A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. :arg sigma: The number of standard deviations above/below the mean to display. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "extended_stats_bucket" def __init__( self, *, sigma: Union[float, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( sigma=sigma, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class FrequentItemSets(Agg[_R]): """ A bucket aggregation which finds frequent item sets, a form of association rules mining that identifies items that often occur together. :arg fields: (required) Fields to analyze. :arg minimum_set_size: The minimum size of one item set. Defaults to `1` if omitted. :arg minimum_support: The minimum support of one item set. Defaults to `0.1` if omitted. :arg size: The number of top item sets to return. Defaults to `10` if omitted. :arg filter: Query that filters documents from analysis. """ name = "frequent_item_sets" _param_defs = { "filter": {"type": "query"}, } def __init__( self, *, fields: Union[ Sequence["types.FrequentItemSetsField"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, minimum_set_size: Union[int, "DefaultType"] = DEFAULT, minimum_support: Union[float, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, filter: Union[Query, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( fields=fields, minimum_set_size=minimum_set_size, minimum_support=minimum_support, size=size, filter=filter, **kwargs, ) class Filter(Bucket[_R]): """ A single bucket aggregation that narrows the set of documents to those that match a query. :arg filter: A single bucket aggregation that narrows the set of documents to those that match a query. """ name = "filter" _param_defs = { "filter": {"type": "query"}, "aggs": {"type": "agg", "hash": True}, } def __init__(self, filter: Union[Query, "DefaultType"] = DEFAULT, **kwargs: Any): super().__init__(filter=filter, **kwargs) def to_dict(self) -> Dict[str, Any]: d = super().to_dict() if isinstance(d[self.name], dict): n = cast(AttrDict[Any], d[self.name]) n.update(n.pop("filter", {})) return d class Filters(Bucket[_R]): """ A multi-bucket aggregation where each bucket contains the documents that match a query. :arg filters: Collection of queries from which to build buckets. :arg other_bucket: Set to `true` to add a bucket to the response which will contain all documents that do not match any of the given filters. :arg other_bucket_key: The key with which the other bucket is returned. Defaults to `_other_` if omitted. :arg keyed: By default, the named filters aggregation returns the buckets as an object. Set to `false` to return the buckets as an array of objects. Defaults to `True` if omitted. """ name = "filters" _param_defs = { "filters": {"type": "query", "hash": True}, "aggs": {"type": "agg", "hash": True}, } def __init__( self, *, filters: Union[Dict[str, Query], "DefaultType"] = DEFAULT, other_bucket: Union[bool, "DefaultType"] = DEFAULT, other_bucket_key: Union[str, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( filters=filters, other_bucket=other_bucket, other_bucket_key=other_bucket_key, keyed=keyed, **kwargs, ) class GeoBounds(Agg[_R]): """ A metric aggregation that computes the geographic bounding box containing all values for a Geopoint or Geoshape field. :arg wrap_longitude: Specifies whether the bounding box should be allowed to overlap the international date line. Defaults to `True` if omitted. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "geo_bounds" def __init__( self, *, wrap_longitude: Union[bool, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( wrap_longitude=wrap_longitude, field=field, missing=missing, script=script, **kwargs, ) class GeoCentroid(Agg[_R]): """ A metric aggregation that computes the weighted centroid from all coordinate values for geo fields. :arg count: :arg location: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "geo_centroid" def __init__( self, *, count: Union[int, "DefaultType"] = DEFAULT, location: Union[ "types.LatLonGeoLocation", "types.GeoHashLocation", Sequence[float], str, Dict[str, Any], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( count=count, location=location, field=field, missing=missing, script=script, **kwargs, ) class GeoDistance(Bucket[_R]): """ A multi-bucket aggregation that works on `geo_point` fields. Evaluates the distance of each document value from an origin point and determines the buckets it belongs to, based on ranges defined in the request. :arg distance_type: The distance calculation type. Defaults to `arc` if omitted. :arg field: A field of type `geo_point` used to evaluate the distance. :arg origin: The origin used to evaluate the distance. :arg ranges: An array of ranges used to bucket documents. :arg unit: The distance unit. Defaults to `m` if omitted. """ name = "geo_distance" def __init__( self, *, distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, origin: Union[ "types.LatLonGeoLocation", "types.GeoHashLocation", Sequence[float], str, Dict[str, Any], "DefaultType", ] = DEFAULT, ranges: Union[ Sequence["wrappers.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, unit: Union[ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( distance_type=distance_type, field=field, origin=origin, ranges=ranges, unit=unit, **kwargs, ) class GeohashGrid(Bucket[_R]): """ A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell is labeled using a geohash which is of user-definable precision. :arg bounds: The bounding box to filter the points in each bucket. :arg field: Field containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohash_grid` aggregates all array values. :arg precision: The string length of the geohashes used to define cells/buckets in the results. Defaults to `5` if omitted. :arg shard_size: Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. :arg size: The maximum number of geohash buckets to return. Defaults to `10000` if omitted. """ name = "geohash_grid" def __init__( self, *, bounds: Union[ "types.CoordsGeoBounds", "types.TopLeftBottomRightGeoBounds", "types.TopRightBottomLeftGeoBounds", "types.WktGeoBounds", Dict[str, Any], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, precision: Union[float, str, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( bounds=bounds, field=field, precision=precision, shard_size=shard_size, size=size, **kwargs, ) class GeoLine(Agg[_R]): """ Aggregates all `geo_point` values within a bucket into a `LineString` ordered by the chosen sort field. :arg point: (required) The name of the geo_point field. :arg sort: (required) The name of the numeric field to use as the sort key for ordering the points. When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. :arg include_sort: When `true`, returns an additional array of the sort values in the feature properties. :arg sort_order: The order in which the line is sorted (ascending or descending). Defaults to `asc` if omitted. :arg size: The maximum length of the line represented in the aggregation. Valid sizes are between 1 and 10000. Defaults to `10000` if omitted. """ name = "geo_line" def __init__( self, *, point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT, sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT, include_sort: Union[bool, "DefaultType"] = DEFAULT, sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( point=point, sort=sort, include_sort=include_sort, sort_order=sort_order, size=size, **kwargs, ) class GeotileGrid(Bucket[_R]): """ A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a map tile as used by many online map sites. :arg field: Field containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geotile_grid` aggregates all array values. :arg precision: Integer zoom of the key used to define cells/buckets in the results. Values outside of the range [0,29] will be rejected. Defaults to `7` if omitted. :arg shard_size: Allows for more accurate counting of the top cells returned in the final result the aggregation. Defaults to returning `max(10,(size x number-of-shards))` buckets from each shard. :arg size: The maximum number of buckets to return. Defaults to `10000` if omitted. :arg bounds: A bounding box to filter the geo-points or geo-shapes in each bucket. """ name = "geotile_grid" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, precision: Union[float, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, bounds: Union[ "types.CoordsGeoBounds", "types.TopLeftBottomRightGeoBounds", "types.TopRightBottomLeftGeoBounds", "types.WktGeoBounds", Dict[str, Any], "DefaultType", ] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, precision=precision, shard_size=shard_size, size=size, bounds=bounds, **kwargs, ) class GeohexGrid(Bucket[_R]): """ A multi-bucket aggregation that groups `geo_point` and `geo_shape` values into buckets that represent a grid. Each cell corresponds to a H3 cell index and is labeled using the H3Index representation. :arg field: (required) Field containing indexed `geo_point` or `geo_shape` values. If the field contains an array, `geohex_grid` aggregates all array values. :arg precision: Integer zoom of the key used to defined cells or buckets in the results. Value should be between 0-15. Defaults to `6` if omitted. :arg bounds: Bounding box used to filter the geo-points in each bucket. :arg size: Maximum number of buckets to return. Defaults to `10000` if omitted. :arg shard_size: Number of buckets returned from each shard. """ name = "geohex_grid" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, precision: Union[int, "DefaultType"] = DEFAULT, bounds: Union[ "types.CoordsGeoBounds", "types.TopLeftBottomRightGeoBounds", "types.TopRightBottomLeftGeoBounds", "types.WktGeoBounds", Dict[str, Any], "DefaultType", ] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, precision=precision, bounds=bounds, size=size, shard_size=shard_size, **kwargs, ) class Global(Bucket[_R]): """ Defines a single bucket of all the documents within the search execution context. This context is defined by the indices and the document types you’re searching on, but is not influenced by the search query itself. """ name = "global" def __init__(self, **kwargs: Any): super().__init__(**kwargs) class Histogram(Bucket[_R]): """ A multi-bucket values source based aggregation that can be applied on numeric values or numeric range values extracted from the documents. It dynamically builds fixed size (interval) buckets over the values. :arg extended_bounds: Enables extending the bounds of the histogram beyond the data itself. :arg hard_bounds: Limits the range of buckets in the histogram. It is particularly useful in the case of open data ranges that can result in a very large number of buckets. :arg field: The name of the field to aggregate on. :arg interval: The interval for the buckets. Must be a positive decimal. :arg min_doc_count: Only returns buckets that have `min_doc_count` number of documents. By default, the response will fill gaps in the histogram with empty buckets. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg offset: By default, the bucket keys start with 0 and then continue in even spaced steps of `interval`. The bucket boundaries can be shifted by using the `offset` option. :arg order: The sort order of the returned buckets. By default, the returned buckets are sorted by their key ascending. :arg script: :arg format: :arg keyed: If `true`, returns buckets as a hash instead of an array, keyed by the bucket keys. """ name = "histogram" def __init__( self, *, extended_bounds: Union[ "types.ExtendedBounds", Dict[str, Any], "DefaultType" ] = DEFAULT, hard_bounds: Union[ "types.ExtendedBounds", Dict[str, Any], "DefaultType" ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, interval: Union[float, "DefaultType"] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, missing: Union[float, "DefaultType"] = DEFAULT, offset: Union[float, "DefaultType"] = DEFAULT, order: Union[ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], "DefaultType", ] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( extended_bounds=extended_bounds, hard_bounds=hard_bounds, field=field, interval=interval, min_doc_count=min_doc_count, missing=missing, offset=offset, order=order, script=script, format=format, keyed=keyed, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) class IPRange(Bucket[_R]): """ A multi-bucket value source based aggregation that enables the user to define a set of IP ranges - each representing a bucket. :arg field: The date field whose values are used to build ranges. :arg ranges: Array of IP ranges. """ name = "ip_range" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, ranges: Union[ Sequence["types.IpRangeAggregationRange"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, **kwargs: Any, ): super().__init__(field=field, ranges=ranges, **kwargs) class IPPrefix(Bucket[_R]): """ A bucket aggregation that groups documents based on the network or sub-network of an IP address. :arg field: (required) The IP address field to aggregation on. The field mapping type must be `ip`. :arg prefix_length: (required) Length of the network prefix. For IPv4 addresses the accepted range is [0, 32]. For IPv6 addresses the accepted range is [0, 128]. :arg is_ipv6: Defines whether the prefix applies to IPv6 addresses. :arg append_prefix_length: Defines whether the prefix length is appended to IP address keys in the response. :arg keyed: Defines whether buckets are returned as a hash rather than an array in the response. :arg min_doc_count: Minimum number of documents in a bucket for it to be included in the response. Defaults to `1` if omitted. """ name = "ip_prefix" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, prefix_length: Union[int, "DefaultType"] = DEFAULT, is_ipv6: Union[bool, "DefaultType"] = DEFAULT, append_prefix_length: Union[bool, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, prefix_length=prefix_length, is_ipv6=is_ipv6, append_prefix_length=append_prefix_length, keyed=keyed, min_doc_count=min_doc_count, **kwargs, ) class Inference(Pipeline[_R]): """ A parent pipeline aggregation which loads a pre-trained model and performs inference on the collated result fields from the parent bucket aggregation. :arg model_id: (required) The ID or alias for the trained model. :arg inference_config: Contains the inference type and its options. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "inference" def __init__( self, *, model_id: Union[str, "DefaultType"] = DEFAULT, inference_config: Union[ "types.InferenceConfigContainer", Dict[str, Any], "DefaultType" ] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model_id=model_id, inference_config=inference_config, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class Line(Agg[_R]): """ :arg point: (required) The name of the geo_point field. :arg sort: (required) The name of the numeric field to use as the sort key for ordering the points. When the `geo_line` aggregation is nested inside a `time_series` aggregation, this field defaults to `@timestamp`, and any other value will result in error. :arg include_sort: When `true`, returns an additional array of the sort values in the feature properties. :arg sort_order: The order in which the line is sorted (ascending or descending). Defaults to `asc` if omitted. :arg size: The maximum length of the line represented in the aggregation. Valid sizes are between 1 and 10000. Defaults to `10000` if omitted. """ name = "line" def __init__( self, *, point: Union["types.GeoLinePoint", Dict[str, Any], "DefaultType"] = DEFAULT, sort: Union["types.GeoLineSort", Dict[str, Any], "DefaultType"] = DEFAULT, include_sort: Union[bool, "DefaultType"] = DEFAULT, sort_order: Union[Literal["asc", "desc"], "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( point=point, sort=sort, include_sort=include_sort, sort_order=sort_order, size=size, **kwargs, ) class MatrixStats(Agg[_R]): """ A numeric aggregation that computes the following statistics over a set of document fields: `count`, `mean`, `variance`, `skewness`, `kurtosis`, `covariance`, and `covariance`. :arg mode: Array value the aggregation will use for array or multi- valued fields. Defaults to `avg` if omitted. :arg fields: An array of fields for computing the statistics. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. """ name = "matrix_stats" def __init__( self, *, mode: Union[ Literal["min", "max", "sum", "avg", "median"], "DefaultType" ] = DEFAULT, fields: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, missing: Union[ Mapping[Union[str, "InstrumentedField"], float], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__(mode=mode, fields=fields, missing=missing, **kwargs) class Max(Agg[_R]): """ A single-value metrics aggregation that returns the maximum value among the numeric values extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "max" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class MaxBucket(Pipeline[_R]): """ A sibling pipeline aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "max_bucket" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class MedianAbsoluteDeviation(Agg[_R]): """ A single-value aggregation that approximates the median absolute deviation of its search results. :arg compression: Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. Defaults to `1000` if omitted. :arg execution_hint: The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. Defaults to `default` if omitted. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "median_absolute_deviation" def __init__( self, *, compression: Union[float, "DefaultType"] = DEFAULT, execution_hint: Union[ Literal["default", "high_accuracy"], "DefaultType" ] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( compression=compression, execution_hint=execution_hint, format=format, field=field, missing=missing, script=script, **kwargs, ) class Min(Agg[_R]): """ A single-value metrics aggregation that returns the minimum value among numeric values extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "min" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class MinBucket(Pipeline[_R]): """ A sibling pipeline aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation and outputs both the value and the key(s) of the bucket(s). :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "min_bucket" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class Missing(Bucket[_R]): """ A field data based single bucket aggregation, that creates a bucket of all documents in the current document set context that are missing a field value (effectively, missing a field or having the configured NULL value set). :arg field: The name of the field. :arg missing: """ name = "missing" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(field=field, missing=missing, **kwargs) class MovingAvg(Pipeline[_R]): """ """ name = "moving_avg" def __init__(self, **kwargs: Any): super().__init__(**kwargs) class LinearMovingAverageAggregation(MovingAvg[_R]): """ :arg model: (required) :arg settings: (required) :arg minimize: :arg predict: :arg window: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ def __init__( self, *, model: Any = DEFAULT, settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, minimize: Union[bool, "DefaultType"] = DEFAULT, predict: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model=model, settings=settings, minimize=minimize, predict=predict, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class SimpleMovingAverageAggregation(MovingAvg[_R]): """ :arg model: (required) :arg settings: (required) :arg minimize: :arg predict: :arg window: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ def __init__( self, *, model: Any = DEFAULT, settings: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, minimize: Union[bool, "DefaultType"] = DEFAULT, predict: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model=model, settings=settings, minimize=minimize, predict=predict, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class EwmaMovingAverageAggregation(MovingAvg[_R]): """ :arg model: (required) :arg settings: (required) :arg minimize: :arg predict: :arg window: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ def __init__( self, *, model: Any = DEFAULT, settings: Union[ "types.EwmaModelSettings", Dict[str, Any], "DefaultType" ] = DEFAULT, minimize: Union[bool, "DefaultType"] = DEFAULT, predict: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model=model, settings=settings, minimize=minimize, predict=predict, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class HoltMovingAverageAggregation(MovingAvg[_R]): """ :arg model: (required) :arg settings: (required) :arg minimize: :arg predict: :arg window: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ def __init__( self, *, model: Any = DEFAULT, settings: Union[ "types.HoltLinearModelSettings", Dict[str, Any], "DefaultType" ] = DEFAULT, minimize: Union[bool, "DefaultType"] = DEFAULT, predict: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model=model, settings=settings, minimize=minimize, predict=predict, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class HoltWintersMovingAverageAggregation(MovingAvg[_R]): """ :arg model: (required) :arg settings: (required) :arg minimize: :arg predict: :arg window: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ def __init__( self, *, model: Any = DEFAULT, settings: Union[ "types.HoltWintersModelSettings", Dict[str, Any], "DefaultType" ] = DEFAULT, minimize: Union[bool, "DefaultType"] = DEFAULT, predict: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( model=model, settings=settings, minimize=minimize, predict=predict, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class MovingPercentiles(Pipeline[_R]): """ Given an ordered series of percentiles, "slides" a window across those percentiles and computes cumulative percentiles. :arg window: The size of window to "slide" across the histogram. :arg shift: By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. :arg keyed: :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "moving_percentiles" def __init__( self, *, window: Union[int, "DefaultType"] = DEFAULT, shift: Union[int, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( window=window, shift=shift, keyed=keyed, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class MovingFn(Pipeline[_R]): """ Given an ordered series of data, "slides" a window across the data and runs a custom script on each window of data. For convenience, a number of common functions are predefined such as `min`, `max`, and moving averages. :arg script: The script that should be executed on each window of data. :arg shift: By default, the window consists of the last n values excluding the current bucket. Increasing `shift` by 1, moves the starting window position by 1 to the right. :arg window: The size of window to "slide" across the histogram. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "moving_fn" def __init__( self, *, script: Union[str, "DefaultType"] = DEFAULT, shift: Union[int, "DefaultType"] = DEFAULT, window: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( script=script, shift=shift, window=window, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class MultiTerms(Bucket[_R]): """ A multi-bucket value source based aggregation where buckets are dynamically built - one per unique set of values. :arg terms: (required) The field from which to generate sets of terms. :arg collect_mode: Specifies the strategy for data collection. Defaults to `breadth_first` if omitted. :arg order: Specifies the sort order of the buckets. Defaults to sorting by descending document count. :arg min_doc_count: The minimum number of documents in a bucket for it to be returned. Defaults to `1` if omitted. :arg shard_min_doc_count: The minimum number of documents in a bucket on each shard for it to be returned. Defaults to `1` if omitted. :arg shard_size: The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. :arg show_term_doc_count_error: Calculates the doc count error on per term basis. :arg size: The number of term buckets should be returned out of the overall terms list. Defaults to `10` if omitted. """ name = "multi_terms" def __init__( self, *, terms: Union[ Sequence["types.MultiTermLookup"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, collect_mode: Union[ Literal["depth_first", "breadth_first"], "DefaultType" ] = DEFAULT, order: Union[ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], "DefaultType", ] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( terms=terms, collect_mode=collect_mode, order=order, min_doc_count=min_doc_count, shard_min_doc_count=shard_min_doc_count, shard_size=shard_size, show_term_doc_count_error=show_term_doc_count_error, size=size, **kwargs, ) class Nested(Bucket[_R]): """ A special single bucket aggregation that enables aggregating nested documents. :arg path: The path to the field of type `nested`. """ name = "nested" def __init__( self, path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(path=path, **kwargs) class Normalize(Pipeline[_R]): """ A parent pipeline aggregation which calculates the specific normalized/rescaled value for a specific bucket value. :arg method: The specific method to apply. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "normalize" def __init__( self, *, method: Union[ Literal[ "rescale_0_1", "rescale_0_100", "percent_of_sum", "mean", "z-score", "softmax", ], "DefaultType", ] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( method=method, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class Parent(Bucket[_R]): """ A special single bucket aggregation that selects parent documents that have the specified type, as defined in a `join` field. :arg type: The child type that should be selected. """ name = "parent" def __init__(self, type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any): super().__init__(type=type, **kwargs) class PercentileRanks(Agg[_R]): """ A multi-value metrics aggregation that calculates one or more percentile ranks over numeric values extracted from the aggregated documents. :arg keyed: By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. Defaults to `True` if omitted. :arg values: An array of values for which to calculate the percentile ranks. :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm to calculate percentile ranks. :arg tdigest: Sets parameters for the default TDigest algorithm used to calculate percentile ranks. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "percentile_ranks" def __init__( self, *, keyed: Union[bool, "DefaultType"] = DEFAULT, values: Union[Sequence[float], None, "DefaultType"] = DEFAULT, hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT, tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( keyed=keyed, values=values, hdr=hdr, tdigest=tdigest, format=format, field=field, missing=missing, script=script, **kwargs, ) class Percentiles(Agg[_R]): """ A multi-value metrics aggregation that calculates one or more percentiles over numeric values extracted from the aggregated documents. :arg keyed: By default, the aggregation associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Set to `false` to disable this behavior. Defaults to `True` if omitted. :arg percents: The percentiles to calculate. :arg hdr: Uses the alternative High Dynamic Range Histogram algorithm to calculate percentiles. :arg tdigest: Sets parameters for the default TDigest algorithm used to calculate percentiles. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "percentiles" def __init__( self, *, keyed: Union[bool, "DefaultType"] = DEFAULT, percents: Union[Sequence[float], "DefaultType"] = DEFAULT, hdr: Union["types.HdrMethod", Dict[str, Any], "DefaultType"] = DEFAULT, tdigest: Union["types.TDigest", Dict[str, Any], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( keyed=keyed, percents=percents, hdr=hdr, tdigest=tdigest, format=format, field=field, missing=missing, script=script, **kwargs, ) class PercentilesBucket(Pipeline[_R]): """ A sibling pipeline aggregation which calculates percentiles across all bucket of a specified metric in a sibling aggregation. :arg percents: The list of percentiles to calculate. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "percentiles_bucket" def __init__( self, *, percents: Union[Sequence[float], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( percents=percents, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class Range(Bucket[_R]): """ A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. :arg field: The date field whose values are use to build ranges. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg ranges: An array of ranges used to bucket documents. :arg script: :arg keyed: Set to `true` to associate a unique string key with each bucket and return the ranges as a hash rather than an array. :arg format: """ name = "range" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[int, "DefaultType"] = DEFAULT, ranges: Union[ Sequence["wrappers.AggregationRange"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, missing=missing, ranges=ranges, script=script, keyed=keyed, format=format, **kwargs, ) class RareTerms(Bucket[_R]): """ A multi-bucket value source based aggregation which finds "rare" terms — terms that are at the long-tail of the distribution and are not frequent. :arg exclude: Terms that should be excluded from the aggregation. :arg field: The field from which to return rare terms. :arg include: Terms that should be included in the aggregation. :arg max_doc_count: The maximum number of documents a term should appear in. Defaults to `1` if omitted. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg precision: The precision of the internal CuckooFilters. Smaller precision leads to better approximation, but higher memory usage. Defaults to `0.001` if omitted. :arg value_type: """ name = "rare_terms" def __init__( self, *, exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, include: Union[ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" ] = DEFAULT, max_doc_count: Union[int, "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, precision: Union[float, "DefaultType"] = DEFAULT, value_type: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( exclude=exclude, field=field, include=include, max_doc_count=max_doc_count, missing=missing, precision=precision, value_type=value_type, **kwargs, ) class Rate(Agg[_R]): """ Calculates a rate of documents or a field in each bucket. Can only be used inside a `date_histogram` or `composite` aggregation. :arg unit: The interval used to calculate the rate. By default, the interval of the `date_histogram` is used. :arg mode: How the rate is calculated. Defaults to `sum` if omitted. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "rate" def __init__( self, *, unit: Union[ Literal[ "second", "minute", "hour", "day", "week", "month", "quarter", "year" ], "DefaultType", ] = DEFAULT, mode: Union[Literal["sum", "value_count"], "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( unit=unit, mode=mode, format=format, field=field, missing=missing, script=script, **kwargs, ) class ReverseNested(Bucket[_R]): """ A special single bucket aggregation that enables aggregating on parent documents from nested documents. Should only be defined inside a `nested` aggregation. :arg path: Defines the nested object field that should be joined back to. The default is empty, which means that it joins back to the root/main document level. """ name = "reverse_nested" def __init__( self, path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(path=path, **kwargs) class RandomSampler(Bucket[_R]): """ A single bucket aggregation that randomly includes documents in the aggregated results. Sampling provides significant speed improvement at the cost of accuracy. :arg probability: (required) The probability that a document will be included in the aggregated data. Must be greater than 0, less than 0.5, or exactly 1. The lower the probability, the fewer documents are matched. :arg seed: The seed to generate the random sampling of documents. When a seed is provided, the random subset of documents is the same between calls. :arg shard_seed: When combined with seed, setting shard_seed ensures 100% consistent sampling over shards where data is exactly the same. """ name = "random_sampler" def __init__( self, *, probability: Union[float, "DefaultType"] = DEFAULT, seed: Union[int, "DefaultType"] = DEFAULT, shard_seed: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( probability=probability, seed=seed, shard_seed=shard_seed, **kwargs ) class Sampler(Bucket[_R]): """ A filtering aggregation used to limit any sub aggregations' processing to a sample of the top-scoring documents. :arg shard_size: Limits how many top-scoring documents are collected in the sample processed on each shard. Defaults to `100` if omitted. """ name = "sampler" def __init__(self, shard_size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any): super().__init__(shard_size=shard_size, **kwargs) class ScriptedMetric(Agg[_R]): """ A metric aggregation that uses scripts to provide a metric output. :arg combine_script: Runs once on each shard after document collection is complete. Allows the aggregation to consolidate the state returned from each shard. :arg init_script: Runs prior to any collection of documents. Allows the aggregation to set up any initial state. :arg map_script: Run once per document collected. If no `combine_script` is specified, the resulting state needs to be stored in the `state` object. :arg params: A global object with script parameters for `init`, `map` and `combine` scripts. It is shared between the scripts. :arg reduce_script: Runs once on the coordinating node after all shards have returned their results. The script is provided with access to a variable `states`, which is an array of the result of the `combine_script` on each shard. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "scripted_metric" def __init__( self, *, combine_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, init_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, map_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, params: Union[Mapping[str, Any], "DefaultType"] = DEFAULT, reduce_script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( combine_script=combine_script, init_script=init_script, map_script=map_script, params=params, reduce_script=reduce_script, field=field, missing=missing, script=script, **kwargs, ) class SerialDiff(Pipeline[_R]): """ An aggregation that subtracts values in a time series from themselves at different time lags or periods. :arg lag: The historical bucket to subtract from the current value. Must be a positive, non-zero integer. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "serial_diff" def __init__( self, *, lag: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( lag=lag, format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs, ) class SignificantTerms(Bucket[_R]): """ Returns interesting or unusual occurrences of terms in a set. :arg background_filter: A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. :arg chi_square: Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. :arg exclude: Terms to exclude. :arg execution_hint: Mechanism by which the aggregation should be executed: using field values directly or using global ordinals. :arg field: The field from which to return significant terms. :arg gnd: Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. :arg include: Terms to include. :arg jlh: Use JLH score as the significance score. :arg min_doc_count: Only return terms that are found in more than `min_doc_count` hits. Defaults to `3` if omitted. :arg mutual_information: Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. :arg percentage: A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. :arg script_heuristic: Customized score, implemented via a script. :arg shard_min_doc_count: Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. :arg shard_size: Can be used to control the volumes of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. :arg size: The number of buckets returned out of the overall terms list. """ name = "significant_terms" _param_defs = { "background_filter": {"type": "query"}, } def __init__( self, *, background_filter: Union[Query, "DefaultType"] = DEFAULT, chi_square: Union[ "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, execution_hint: Union[ Literal[ "map", "global_ordinals", "global_ordinals_hash", "global_ordinals_low_cardinality", ], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, gnd: Union[ "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, include: Union[ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" ] = DEFAULT, jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, mutual_information: Union[ "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, percentage: Union[ "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, script_heuristic: Union[ "types.ScriptedHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( background_filter=background_filter, chi_square=chi_square, exclude=exclude, execution_hint=execution_hint, field=field, gnd=gnd, include=include, jlh=jlh, min_doc_count=min_doc_count, mutual_information=mutual_information, percentage=percentage, script_heuristic=script_heuristic, shard_min_doc_count=shard_min_doc_count, shard_size=shard_size, size=size, **kwargs, ) class SignificantText(Bucket[_R]): """ Returns interesting or unusual occurrences of free-text terms in a set. :arg background_filter: A background filter that can be used to focus in on significant terms within a narrower context, instead of the entire index. :arg chi_square: Use Chi square, as described in "Information Retrieval", Manning et al., Chapter 13.5.2, as the significance score. :arg exclude: Values to exclude. :arg execution_hint: Determines whether the aggregation will use field values directly or global ordinals. :arg field: The field from which to return significant text. :arg filter_duplicate_text: Whether to out duplicate text to deal with noisy data. :arg gnd: Use Google normalized distance as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, 2007, as the significance score. :arg include: Values to include. :arg jlh: Use JLH score as the significance score. :arg min_doc_count: Only return values that are found in more than `min_doc_count` hits. Defaults to `3` if omitted. :arg mutual_information: Use mutual information as described in "Information Retrieval", Manning et al., Chapter 13.5.1, as the significance score. :arg percentage: A simple calculation of the number of documents in the foreground sample with a term divided by the number of documents in the background with the term. :arg script_heuristic: Customized score, implemented via a script. :arg shard_min_doc_count: Regulates the certainty a shard has if the values should actually be added to the candidate list or not with respect to the min_doc_count. Values will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. :arg shard_size: The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. :arg size: The number of buckets returned out of the overall terms list. :arg source_fields: Overrides the JSON `_source` fields from which text will be analyzed. """ name = "significant_text" _param_defs = { "background_filter": {"type": "query"}, } def __init__( self, *, background_filter: Union[Query, "DefaultType"] = DEFAULT, chi_square: Union[ "types.ChiSquareHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, execution_hint: Union[ Literal[ "map", "global_ordinals", "global_ordinals_hash", "global_ordinals_low_cardinality", ], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, filter_duplicate_text: Union[bool, "DefaultType"] = DEFAULT, gnd: Union[ "types.GoogleNormalizedDistanceHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, include: Union[ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" ] = DEFAULT, jlh: Union["types.EmptyObject", Dict[str, Any], "DefaultType"] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, mutual_information: Union[ "types.MutualInformationHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, percentage: Union[ "types.PercentageScoreHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, script_heuristic: Union[ "types.ScriptedHeuristic", Dict[str, Any], "DefaultType" ] = DEFAULT, shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, source_fields: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, **kwargs: Any, ): super().__init__( background_filter=background_filter, chi_square=chi_square, exclude=exclude, execution_hint=execution_hint, field=field, filter_duplicate_text=filter_duplicate_text, gnd=gnd, include=include, jlh=jlh, min_doc_count=min_doc_count, mutual_information=mutual_information, percentage=percentage, script_heuristic=script_heuristic, shard_min_doc_count=shard_min_doc_count, shard_size=shard_size, size=size, source_fields=source_fields, **kwargs, ) class Stats(Agg[_R]): """ A multi-value metrics aggregation that computes stats over numeric values extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "stats" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class StatsBucket(Pipeline[_R]): """ A sibling pipeline aggregation which calculates a variety of stats across all bucket of a specified metric in a sibling aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "stats_bucket" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class StringStats(Agg[_R]): """ A multi-value metrics aggregation that computes statistics over string values extracted from the aggregated documents. :arg show_distribution: Shows the probability distribution for all characters. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "string_stats" def __init__( self, *, show_distribution: Union[bool, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( show_distribution=show_distribution, field=field, missing=missing, script=script, **kwargs, ) class Sum(Agg[_R]): """ A single-value metrics aggregation that sums numeric values that are extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "sum" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class SumBucket(Pipeline[_R]): """ A sibling pipeline aggregation which calculates the sum of a specified metric across all buckets in a sibling aggregation. :arg format: `DecimalFormat` pattern for the output value. If specified, the formatted value is returned in the aggregation’s `value_as_string` property. :arg gap_policy: Policy to apply when gaps are found in the data. Defaults to `skip` if omitted. :arg buckets_path: Path to the buckets that contain one set of values to correlate. """ name = "sum_bucket" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, gap_policy: Union[ Literal["skip", "insert_zeros", "keep_values"], "DefaultType" ] = DEFAULT, buckets_path: Union[ str, Sequence[str], Mapping[str, str], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, gap_policy=gap_policy, buckets_path=buckets_path, **kwargs ) class Terms(Bucket[_R]): """ A multi-bucket value source based aggregation where buckets are dynamically built - one per unique value. :arg collect_mode: Determines how child aggregations should be calculated: breadth-first or depth-first. :arg exclude: Values to exclude. Accepts regular expressions and partitions. :arg execution_hint: Determines whether the aggregation will use field values directly or global ordinals. :arg field: The field from which to return terms. :arg include: Values to include. Accepts regular expressions and partitions. :arg min_doc_count: Only return values that are found in more than `min_doc_count` hits. Defaults to `1` if omitted. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg missing_order: :arg missing_bucket: :arg value_type: Coerced unmapped fields into the specified type. :arg order: Specifies the sort order of the buckets. Defaults to sorting by descending document count. :arg script: :arg shard_min_doc_count: Regulates the certainty a shard has if the term should actually be added to the candidate list or not with respect to the `min_doc_count`. Terms will only be considered if their local shard frequency within the set is higher than the `shard_min_doc_count`. :arg shard_size: The number of candidate terms produced by each shard. By default, `shard_size` will be automatically estimated based on the number of shards and the `size` parameter. :arg show_term_doc_count_error: Set to `true` to return the `doc_count_error_upper_bound`, which is an upper bound to the error on the `doc_count` returned by each shard. :arg size: The number of buckets returned out of the overall terms list. Defaults to `10` if omitted. :arg format: """ name = "terms" def __init__( self, *, collect_mode: Union[ Literal["depth_first", "breadth_first"], "DefaultType" ] = DEFAULT, exclude: Union[str, Sequence[str], "DefaultType"] = DEFAULT, execution_hint: Union[ Literal[ "map", "global_ordinals", "global_ordinals_hash", "global_ordinals_low_cardinality", ], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, include: Union[ str, Sequence[str], "types.TermsPartition", Dict[str, Any], "DefaultType" ] = DEFAULT, min_doc_count: Union[int, "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, missing_order: Union[ Literal["first", "last", "default"], "DefaultType" ] = DEFAULT, missing_bucket: Union[bool, "DefaultType"] = DEFAULT, value_type: Union[str, "DefaultType"] = DEFAULT, order: Union[ Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]], Sequence[Mapping[Union[str, "InstrumentedField"], Literal["asc", "desc"]]], "DefaultType", ] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, shard_min_doc_count: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, show_term_doc_count_error: Union[bool, "DefaultType"] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( collect_mode=collect_mode, exclude=exclude, execution_hint=execution_hint, field=field, include=include, min_doc_count=min_doc_count, missing=missing, missing_order=missing_order, missing_bucket=missing_bucket, value_type=value_type, order=order, script=script, shard_min_doc_count=shard_min_doc_count, shard_size=shard_size, show_term_doc_count_error=show_term_doc_count_error, size=size, format=format, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) class TimeSeries(Bucket[_R]): """ The time series aggregation queries data created using a time series index. This is typically data such as metrics or other data streams with a time component, and requires creating an index using the time series mode. :arg size: The maximum number of results to return. Defaults to `10000` if omitted. :arg keyed: Set to `true` to associate a unique string key with each bucket and returns the ranges as a hash rather than an array. """ name = "time_series" def __init__( self, *, size: Union[int, "DefaultType"] = DEFAULT, keyed: Union[bool, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(size=size, keyed=keyed, **kwargs) class TopHits(Agg[_R]): """ A metric aggregation that returns the top matching documents per bucket. :arg docvalue_fields: Fields for which to return doc values. :arg explain: If `true`, returns detailed information about score computation as part of a hit. :arg fields: Array of wildcard (*) patterns. The request returns values for field names matching these patterns in the hits.fields property of the response. :arg from: Starting document offset. :arg highlight: Specifies the highlighter to use for retrieving highlighted snippets from one or more fields in the search results. :arg script_fields: Returns the result of one or more script evaluations for each hit. :arg size: The maximum number of top matching hits to return per bucket. Defaults to `3` if omitted. :arg sort: Sort order of the top matching hits. By default, the hits are sorted by the score of the main query. :arg _source: Selects the fields of the source that are returned. :arg stored_fields: Returns values for the specified stored fields (fields that use the `store` mapping option). :arg track_scores: If `true`, calculates and returns document scores, even if the scores are not used for sorting. :arg version: If `true`, returns document version as part of a hit. :arg seq_no_primary_term: If `true`, returns sequence number and primary term of the last modification of each hit. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "top_hits" def __init__( self, *, docvalue_fields: Union[ Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, explain: Union[bool, "DefaultType"] = DEFAULT, fields: Union[ Sequence["types.FieldAndFormat"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, from_: Union[int, "DefaultType"] = DEFAULT, highlight: Union["types.Highlight", Dict[str, Any], "DefaultType"] = DEFAULT, script_fields: Union[ Mapping[str, "types.ScriptField"], Dict[str, Any], "DefaultType" ] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, sort: Union[ Union[Union[str, "InstrumentedField"], "types.SortOptions"], Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], Dict[str, Any], "DefaultType", ] = DEFAULT, _source: Union[ bool, "types.SourceFilter", Dict[str, Any], "DefaultType" ] = DEFAULT, stored_fields: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, track_scores: Union[bool, "DefaultType"] = DEFAULT, version: Union[bool, "DefaultType"] = DEFAULT, seq_no_primary_term: Union[bool, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( docvalue_fields=docvalue_fields, explain=explain, fields=fields, from_=from_, highlight=highlight, script_fields=script_fields, size=size, sort=sort, _source=_source, stored_fields=stored_fields, track_scores=track_scores, version=version, seq_no_primary_term=seq_no_primary_term, field=field, missing=missing, script=script, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return TopHitsData(self, search, data) class TTest(Agg[_R]): """ A metrics aggregation that performs a statistical hypothesis test in which the test statistic follows a Student’s t-distribution under the null hypothesis on numeric values extracted from the aggregated documents. :arg a: Test population A. :arg b: Test population B. :arg type: The type of test. Defaults to `heteroscedastic` if omitted. """ name = "t_test" def __init__( self, *, a: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT, b: Union["types.TestPopulation", Dict[str, Any], "DefaultType"] = DEFAULT, type: Union[ Literal["paired", "homoscedastic", "heteroscedastic"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__(a=a, b=b, type=type, **kwargs) class TopMetrics(Agg[_R]): """ A metric aggregation that selects metrics from the document with the largest or smallest sort value. :arg metrics: The fields of the top document to return. :arg size: The number of top documents from which to return metrics. Defaults to `1` if omitted. :arg sort: The sort order of the documents. :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "top_metrics" def __init__( self, *, metrics: Union[ "types.TopMetricsValue", Sequence["types.TopMetricsValue"], Sequence[Dict[str, Any]], "DefaultType", ] = DEFAULT, size: Union[int, "DefaultType"] = DEFAULT, sort: Union[ Union[Union[str, "InstrumentedField"], "types.SortOptions"], Sequence[Union[Union[str, "InstrumentedField"], "types.SortOptions"]], Dict[str, Any], "DefaultType", ] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( metrics=metrics, size=size, sort=sort, field=field, missing=missing, script=script, **kwargs, ) class ValueCount(Agg[_R]): """ A single-value metrics aggregation that counts the number of values that are extracted from the aggregated documents. :arg format: :arg field: The field on which to run the aggregation. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. :arg script: """ name = "value_count" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, missing: Union[str, int, float, bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, field=field, missing=missing, script=script, **kwargs ) class WeightedAvg(Agg[_R]): """ A single-value metrics aggregation that computes the weighted average of numeric values that are extracted from the aggregated documents. :arg format: A numeric response formatter. :arg value: Configuration for the field that provides the values. :arg value_type: :arg weight: Configuration for the field or script that provides the weights. """ name = "weighted_avg" def __init__( self, *, format: Union[str, "DefaultType"] = DEFAULT, value: Union[ "types.WeightedAverageValue", Dict[str, Any], "DefaultType" ] = DEFAULT, value_type: Union[ Literal[ "string", "long", "double", "number", "date", "date_nanos", "ip", "numeric", "geo_point", "boolean", ], "DefaultType", ] = DEFAULT, weight: Union[ "types.WeightedAverageValue", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): super().__init__( format=format, value=value, value_type=value_type, weight=weight, **kwargs ) class VariableWidthHistogram(Bucket[_R]): """ A multi-bucket aggregation similar to the histogram, except instead of providing an interval to use as the width of each bucket, a target number of buckets is provided. :arg field: The name of the field. :arg buckets: The target number of buckets. Defaults to `10` if omitted. :arg shard_size: The number of buckets that the coordinating node will request from each shard. Defaults to `buckets * 50`. :arg initial_buffer: Specifies the number of individual documents that will be stored in memory on a shard before the initial bucketing algorithm is run. Defaults to `min(10 * shard_size, 50000)`. :arg script: """ name = "variable_width_histogram" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, buckets: Union[int, "DefaultType"] = DEFAULT, shard_size: Union[int, "DefaultType"] = DEFAULT, initial_buffer: Union[int, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, buckets=buckets, shard_size=shard_size, initial_buffer=initial_buffer, script=script, **kwargs, ) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) python-elasticsearch-9.1.1/elasticsearch/dsl/analysis.py000066400000000000000000000241041506101734100234430ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, ClassVar, Dict, List, Optional, Union, cast from . import async_connections, connections from .utils import AsyncUsingType, AttrDict, DslBase, UsingType, merge __all__ = ["tokenizer", "analyzer", "char_filter", "token_filter", "normalizer"] class AnalysisBase: @classmethod def _type_shortcut( cls, name_or_instance: Union[str, "AnalysisBase"], type: Optional[str] = None, **kwargs: Any, ) -> DslBase: if isinstance(name_or_instance, cls): if type or kwargs: raise ValueError(f"{cls.__name__}() cannot accept parameters.") return name_or_instance # type: ignore[return-value] if not (type or kwargs): return cls.get_dsl_class("builtin")(name_or_instance) # type: ignore[no-any-return, attr-defined] return cls.get_dsl_class(type, "custom")( # type: ignore[no-any-return, attr-defined] name_or_instance, type or "custom", **kwargs ) class CustomAnalysis: name = "custom" def __init__(self, filter_name: str, builtin_type: str = "custom", **kwargs: Any): self._builtin_type = builtin_type self._name = filter_name super().__init__(**kwargs) def to_dict(self) -> Dict[str, Any]: # only name to present in lists return self._name # type: ignore[return-value] def get_definition(self) -> Dict[str, Any]: d = super().to_dict() # type: ignore[misc] d = d.pop(self.name) d["type"] = self._builtin_type return d # type: ignore[no-any-return] class CustomAnalysisDefinition(CustomAnalysis): _type_name: str _param_defs: ClassVar[Dict[str, Any]] filter: List[Any] char_filter: List[Any] def get_analysis_definition(self) -> Dict[str, Any]: out = {self._type_name: {self._name: self.get_definition()}} t = cast("Tokenizer", getattr(self, "tokenizer", None)) if "tokenizer" in self._param_defs and hasattr(t, "get_definition"): out["tokenizer"] = {t._name: t.get_definition()} filters = { f._name: f.get_definition() for f in self.filter if hasattr(f, "get_definition") } if filters: out["filter"] = filters # any sub filter definitions like multiplexers etc? for f in self.filter: if hasattr(f, "get_analysis_definition"): d = f.get_analysis_definition() if d: merge(out, d, True) char_filters = { f._name: f.get_definition() for f in self.char_filter if hasattr(f, "get_definition") } if char_filters: out["char_filter"] = char_filters return out class BuiltinAnalysis: name = "builtin" def __init__(self, name: str): self._name = name super().__init__() def to_dict(self) -> Dict[str, Any]: # only name to present in lists return self._name # type: ignore[return-value] class Analyzer(AnalysisBase, DslBase): _type_name = "analyzer" name = "" class BuiltinAnalyzer(BuiltinAnalysis, Analyzer): def get_analysis_definition(self) -> Dict[str, Any]: return {} class CustomAnalyzer(CustomAnalysisDefinition, Analyzer): _param_defs = { "filter": {"type": "token_filter", "multi": True}, "char_filter": {"type": "char_filter", "multi": True}, "tokenizer": {"type": "tokenizer"}, } def _get_body( self, text: str, explain: bool, attributes: Optional[Dict[str, Any]] ) -> Dict[str, Any]: body = {"text": text, "explain": explain} if attributes: body["attributes"] = attributes definition = self.get_analysis_definition() analyzer_def = self.get_definition() for section in ("tokenizer", "char_filter", "filter"): if section not in analyzer_def: continue sec_def = definition.get(section, {}) sec_names = analyzer_def[section] if isinstance(sec_names, str): body[section] = sec_def.get(sec_names, sec_names) else: body[section] = [ sec_def.get(sec_name, sec_name) for sec_name in sec_names ] if self._builtin_type != "custom": body["analyzer"] = self._builtin_type return body def simulate( self, text: str, using: UsingType = "default", explain: bool = False, attributes: Optional[Dict[str, Any]] = None, ) -> AttrDict[Any]: """ Use the Analyze API of elasticsearch to test the outcome of this analyzer. :arg text: Text to be analyzed :arg using: connection alias to use, defaults to ``'default'`` :arg explain: will output all token attributes for each token. You can filter token attributes you want to output by setting ``attributes`` option. :arg attributes: if ``explain`` is specified, filter the token attributes to return. """ es = connections.get_connection(using) return AttrDict( cast( Dict[str, Any], es.indices.analyze(body=self._get_body(text, explain, attributes)), ) ) async def async_simulate( self, text: str, using: AsyncUsingType = "default", explain: bool = False, attributes: Optional[Dict[str, Any]] = None, ) -> AttrDict[Any]: """ Use the Analyze API of elasticsearch to test the outcome of this analyzer. :arg text: Text to be analyzed :arg using: connection alias to use, defaults to ``'default'`` :arg explain: will output all token attributes for each token. You can filter token attributes you want to output by setting ``attributes`` option. :arg attributes: if ``explain`` is specified, filter the token attributes to return. """ es = async_connections.get_connection(using) return AttrDict( cast( Dict[str, Any], await es.indices.analyze( body=self._get_body(text, explain, attributes) ), ) ) class Normalizer(AnalysisBase, DslBase): _type_name = "normalizer" name = "" class BuiltinNormalizer(BuiltinAnalysis, Normalizer): def get_analysis_definition(self) -> Dict[str, Any]: return {} class CustomNormalizer(CustomAnalysisDefinition, Normalizer): _param_defs = { "filter": {"type": "token_filter", "multi": True}, "char_filter": {"type": "char_filter", "multi": True}, } class Tokenizer(AnalysisBase, DslBase): _type_name = "tokenizer" name = "" class BuiltinTokenizer(BuiltinAnalysis, Tokenizer): pass class CustomTokenizer(CustomAnalysis, Tokenizer): pass class TokenFilter(AnalysisBase, DslBase): _type_name = "token_filter" name = "" class BuiltinTokenFilter(BuiltinAnalysis, TokenFilter): pass class CustomTokenFilter(CustomAnalysis, TokenFilter): pass class MultiplexerTokenFilter(CustomTokenFilter): name = "multiplexer" def get_definition(self) -> Dict[str, Any]: d = super(CustomTokenFilter, self).get_definition() if "filters" in d: d["filters"] = [ # comma delimited string given by user ( fs if isinstance(fs, str) else # list of strings or TokenFilter objects ", ".join(f.to_dict() if hasattr(f, "to_dict") else f for f in fs) ) for fs in self.filters ] return d def get_analysis_definition(self) -> Dict[str, Any]: if not hasattr(self, "filters"): return {} fs: Dict[str, Any] = {} d = {"filter": fs} for filters in self.filters: if isinstance(filters, str): continue fs.update( { f._name: f.get_definition() for f in filters if hasattr(f, "get_definition") } ) return d class ConditionalTokenFilter(CustomTokenFilter): name = "condition" def get_definition(self) -> Dict[str, Any]: d = super(CustomTokenFilter, self).get_definition() if "filter" in d: d["filter"] = [ f.to_dict() if hasattr(f, "to_dict") else f for f in self.filter ] return d def get_analysis_definition(self) -> Dict[str, Any]: if not hasattr(self, "filter"): return {} return { "filter": { f._name: f.get_definition() for f in self.filter if hasattr(f, "get_definition") } } class CharFilter(AnalysisBase, DslBase): _type_name = "char_filter" name = "" class BuiltinCharFilter(BuiltinAnalysis, CharFilter): pass class CustomCharFilter(CustomAnalysis, CharFilter): pass # shortcuts for direct use analyzer = Analyzer._type_shortcut tokenizer = Tokenizer._type_shortcut token_filter = TokenFilter._type_shortcut char_filter = CharFilter._type_shortcut normalizer = Normalizer._type_shortcut python-elasticsearch-9.1.1/elasticsearch/dsl/async_connections.py000066400000000000000000000026531506101734100253440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Type from elasticsearch import AsyncElasticsearch from .connections import Connections class AsyncElasticsearchConnections(Connections[AsyncElasticsearch]): def __init__( self, *, elasticsearch_class: Type[AsyncElasticsearch] = AsyncElasticsearch ): super().__init__(elasticsearch_class=elasticsearch_class) connections = AsyncElasticsearchConnections(elasticsearch_class=AsyncElasticsearch) configure = connections.configure add_connection = connections.add_connection remove_connection = connections.remove_connection create_connection = connections.create_connection get_connection = connections.get_connection python-elasticsearch-9.1.1/elasticsearch/dsl/connections.py000066400000000000000000000120701506101734100241410ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Generic, Type, TypeVar, Union from elasticsearch import Elasticsearch, __versionstr__ from .serializer import serializer _T = TypeVar("_T") class Connections(Generic[_T]): """ Class responsible for holding connections to different clusters. Used as a singleton in this module. """ def __init__(self, *, elasticsearch_class: Type[_T]): self._kwargs: Dict[str, Any] = {} self._conns: Dict[str, _T] = {} self.elasticsearch_class: Type[_T] = elasticsearch_class def configure(self, **kwargs: Any) -> None: """ Configure multiple connections at once, useful for passing in config dictionaries obtained from other sources, like Django's settings or a configuration management tool. Example:: connections.configure( default={'hosts': 'localhost'}, dev={'hosts': ['esdev1.example.com:9200'], 'sniff_on_start': True}, ) Connections will only be constructed lazily when requested through ``get_connection``. """ for k in list(self._conns): # try and preserve existing client to keep the persistent connections alive if k in self._kwargs and kwargs.get(k, None) == self._kwargs[k]: continue del self._conns[k] self._kwargs = kwargs def add_connection(self, alias: str, conn: _T) -> None: """ Add a connection object, it will be passed through as-is. """ self._conns[alias] = self._with_user_agent(conn) def remove_connection(self, alias: str) -> None: """ Remove connection from the registry. Raises ``KeyError`` if connection wasn't found. """ errors = 0 for d in (self._conns, self._kwargs): try: del d[alias] except KeyError: errors += 1 if errors == 2: raise KeyError(f"There is no connection with alias {alias!r}.") def create_connection(self, alias: str = "default", **kwargs: Any) -> _T: """ Construct an instance of ``elasticsearch.Elasticsearch`` and register it under given alias. """ kwargs.setdefault("serializer", serializer) conn = self._conns[alias] = self.elasticsearch_class(**kwargs) return self._with_user_agent(conn) def get_connection(self, alias: Union[str, _T] = "default") -> _T: """ Retrieve a connection, construct it if necessary (only configuration was passed to us). If a non-string alias has been passed through we assume it's already a client instance and will just return it as-is. Raises ``KeyError`` if no client (or its definition) is registered under the alias. """ # do not check isinstance(Elasticsearch) so that people can wrap their # clients if not isinstance(alias, str): return self._with_user_agent(alias) # connection already established try: return self._conns[alias] except KeyError: pass # if not, try to create it try: return self.create_connection(alias, **self._kwargs[alias]) except KeyError: # no connection and no kwargs to set one up raise KeyError(f"There is no connection with alias {alias!r}.") def _with_user_agent(self, conn: _T) -> _T: # try to inject our user agent if hasattr(conn, "_headers"): is_frozen = conn._headers.frozen if is_frozen: conn._headers = conn._headers.copy() conn._headers.update( {"user-agent": f"elasticsearch-dsl-py/{__versionstr__}"} ) if is_frozen: conn._headers.freeze() return conn class ElasticsearchConnections(Connections[Elasticsearch]): def __init__(self, *, elasticsearch_class: Type[Elasticsearch] = Elasticsearch): super().__init__(elasticsearch_class=elasticsearch_class) connections = ElasticsearchConnections() configure = connections.configure add_connection = connections.add_connection remove_connection = connections.remove_connection create_connection = connections.create_connection get_connection = connections.get_connection python-elasticsearch-9.1.1/elasticsearch/dsl/document.py000066400000000000000000000016751506101734100234460ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.document import AsyncDocument # noqa: F401 from ._sync.document import Document # noqa: F401 from .document_base import InnerDoc, MetaField # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/dsl/document_base.py000066400000000000000000000572071506101734100244420ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from datetime import date, datetime from fnmatch import fnmatch from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generic, List, Optional, Tuple, TypeVar, Union, get_args, overload, ) try: from types import UnionType except ImportError: UnionType = None # type: ignore[assignment, misc] from typing_extensions import dataclass_transform from .exceptions import ValidationException from .field import Binary, Boolean, Date, Field, Float, Integer, Nested, Object, Text from .mapping import Mapping from .utils import DOC_META_FIELDS, ObjectBase if TYPE_CHECKING: from elastic_transport import ObjectApiResponse from ..esql.esql import ESQLBase from .index_base import IndexBase class MetaField: def __init__(self, *args: Any, **kwargs: Any): self.args, self.kwargs = args, kwargs class InstrumentedExpression: """Proxy object for a ES|QL expression.""" def __init__(self, expr: str): self._expr = expr def _render_value(self, value: Any) -> str: if isinstance(value, InstrumentedExpression): return str(value) return json.dumps(value) def __str__(self) -> str: return self._expr def __repr__(self) -> str: return f"InstrumentedExpression[{self._expr}]" def __pos__(self) -> "InstrumentedExpression": return self def __neg__(self) -> "InstrumentedExpression": return InstrumentedExpression(f"-({self._expr})") def __eq__(self, value: Any) -> "InstrumentedExpression": # type: ignore[override] return InstrumentedExpression(f"{self._expr} == {self._render_value(value)}") def __ne__(self, value: Any) -> "InstrumentedExpression": # type: ignore[override] return InstrumentedExpression(f"{self._expr} != {self._render_value(value)}") def __lt__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} < {self._render_value(value)}") def __gt__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} > {self._render_value(value)}") def __le__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} <= {self._render_value(value)}") def __ge__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} >= {self._render_value(value)}") def __add__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} + {self._render_value(value)}") def __radd__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._render_value(value)} + {self._expr}") def __sub__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} - {self._render_value(value)}") def __rsub__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._render_value(value)} - {self._expr}") def __mul__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} * {self._render_value(value)}") def __rmul__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._render_value(value)} * {self._expr}") def __truediv__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} / {self._render_value(value)}") def __rtruediv__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._render_value(value)} / {self._expr}") def __mod__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._expr} % {self._render_value(value)}") def __rmod__(self, value: Any) -> "InstrumentedExpression": return InstrumentedExpression(f"{self._render_value(value)} % {self._expr}") def is_null(self) -> "InstrumentedExpression": """Compare the expression against NULL.""" return InstrumentedExpression(f"{self._expr} IS NULL") def is_not_null(self) -> "InstrumentedExpression": """Compare the expression against NOT NULL.""" return InstrumentedExpression(f"{self._expr} IS NOT NULL") def in_(self, *values: Any) -> "InstrumentedExpression": """Test if the expression equals one of the given values.""" rendered_values = ", ".join([f"{value}" for value in values]) return InstrumentedExpression(f"{self._expr} IN ({rendered_values})") def like(self, *patterns: str) -> "InstrumentedExpression": """Filter the expression using a string pattern.""" if len(patterns) == 1: return InstrumentedExpression( f"{self._expr} LIKE {self._render_value(patterns[0])}" ) else: return InstrumentedExpression( f'{self._expr} LIKE ({", ".join([self._render_value(p) for p in patterns])})' ) def rlike(self, *patterns: str) -> "InstrumentedExpression": """Filter the expression using a regular expression.""" if len(patterns) == 1: return InstrumentedExpression( f"{self._expr} RLIKE {self._render_value(patterns[0])}" ) else: return InstrumentedExpression( f'{self._expr} RLIKE ({", ".join([self._render_value(p) for p in patterns])})' ) def match(self, query: str) -> "InstrumentedExpression": """Perform a match query on the field.""" return InstrumentedExpression(f"{self._expr}:{self._render_value(query)}") def asc(self) -> "InstrumentedExpression": """Return the field name representation for ascending sort order. For use in ES|QL queries only. """ return InstrumentedExpression(f"{self._expr} ASC") def desc(self) -> "InstrumentedExpression": """Return the field name representation for descending sort order. For use in ES|QL queries only. """ return InstrumentedExpression(f"{self._expr} DESC") def nulls_first(self) -> "InstrumentedExpression": """Return the field name representation for nulls first sort order. For use in ES|QL queries only. """ return InstrumentedExpression(f"{self._expr} NULLS FIRST") def nulls_last(self) -> "InstrumentedExpression": """Return the field name representation for nulls last sort order. For use in ES|QL queries only. """ return InstrumentedExpression(f"{self._expr} NULLS LAST") def where( self, *expressions: Union[str, "InstrumentedExpression"] ) -> "InstrumentedExpression": """Add a condition to be met for the row to be included. Use only in expressions given in the ``STATS`` command. """ if len(expressions) == 1: return InstrumentedExpression(f"{self._expr} WHERE {expressions[0]}") else: return InstrumentedExpression( f'{self._expr} WHERE {" AND ".join([f"({expr})" for expr in expressions])}' ) E = InstrumentedExpression class InstrumentedField(InstrumentedExpression): """Proxy object for a mapped document field. An object of this instance is returned when a field is accessed as a class attribute of a ``Document`` or ``InnerDoc`` subclass. These objects can be used in any situation in which a reference to a field is required, such as when specifying sort options in a search:: class MyDocument(Document): name: str s = MyDocument.search() s = s.sort(-MyDocument.name) # sort by name in descending order """ def __init__(self, name: str, field: Optional[Field]): super().__init__(name) self._field = field # note that the return value type here assumes classes will only be used to # access fields (I haven't found a way to make this type dynamic based on a # decision taken at runtime) def __getattr__(self, attr: str) -> "InstrumentedField": try: # first let's see if this is an attribute of this object return super().__getattribute__(attr) # type: ignore[no-any-return] except AttributeError: if self._field: try: # next we see if we have a sub-field with this name return InstrumentedField(f"{self._expr}.{attr}", self._field[attr]) except KeyError: # lastly we let the wrapped field resolve this attribute return getattr(self._field, attr) # type: ignore[no-any-return] else: raise def __pos__(self) -> str: # type: ignore[override] """Return the field name representation for ascending sort order""" return f"{self._expr}" def __neg__(self) -> str: # type: ignore[override] """Return the field name representation for descending sort order""" return f"-{self._expr}" def __str__(self) -> str: return self._expr def __repr__(self) -> str: return f"InstrumentedField[{self._expr}]" class DocumentMeta(type): _doc_type: "DocumentOptions" _index: "IndexBase" def __new__( cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any] ) -> "DocumentMeta": # DocumentMeta filters attrs in place attrs["_doc_type"] = DocumentOptions(name, bases, attrs) return super().__new__(cls, name, bases, attrs) def __getattr__(cls, attr: str) -> Any: if attr in cls._doc_type.mapping: return InstrumentedField(attr, cls._doc_type.mapping[attr]) return super().__getattribute__(attr) class DocumentOptions: type_annotation_map = { int: (Integer, {}), float: (Float, {}), bool: (Boolean, {}), str: (Text, {}), bytes: (Binary, {}), datetime: (Date, {}), date: (Date, {"format": "yyyy-MM-dd"}), } def __init__(self, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]): meta = attrs.pop("Meta", None) # create the mapping instance self.mapping: Mapping = getattr(meta, "mapping", Mapping()) # register the document's fields, which can be given in a few formats: # # class MyDocument(Document): # # required field using native typing # # (str, int, float, bool, datetime, date) # field1: str # # # optional field using native typing # field2: Optional[datetime] # # # array field using native typing # field3: list[int] # # # sub-object, same as Object(MyInnerDoc) # field4: MyInnerDoc # # # nested sub-objects, same as Nested(MyInnerDoc) # field5: list[MyInnerDoc] # # # use typing, but override with any stock or custom field # field6: bool = MyCustomField() # # # best mypy and pyright support and dataclass-like behavior # field7: M[date] # field8: M[str] = mapped_field(MyCustomText(), default="foo") # # # legacy format without Python typing # field9 = Text() # # # ignore attributes # field10: ClassVar[string] = "a regular class variable" annotations = attrs.get("__annotations__", {}) fields = {n for n in attrs if isinstance(attrs[n], Field)} fields.update(annotations.keys()) field_defaults = {} for name in fields: value: Any = None required = None multi = None if name in annotations: # the field has a type annotation, so next we try to figure out # what field type we can use type_ = annotations[name] skip = False required = True multi = False while hasattr(type_, "__origin__"): if type_.__origin__ == ClassVar: skip = True break elif type_.__origin__ == Mapped: # M[type] -> extract the wrapped type type_ = type_.__args__[0] elif type_.__origin__ == Union: if len(type_.__args__) == 2 and type_.__args__[1] is type(None): # Optional[type] -> mark instance as optional required = False type_ = type_.__args__[0] else: raise TypeError("Unsupported union") elif type_.__origin__ in [list, List]: # List[type] -> mark instance as multi multi = True required = False type_ = type_.__args__[0] else: break if skip or type_ == ClassVar: # skip ClassVar attributes continue if type(type_) is UnionType: # a union given with the pipe syntax args = get_args(type_) if len(args) == 2 and args[1] is type(None): required = False type_ = type_.__args__[0] else: raise TypeError("Unsupported union") field = None field_args: List[Any] = [] field_kwargs: Dict[str, Any] = {} if isinstance(type_, type) and issubclass(type_, InnerDoc): # object or nested field field = Nested if multi else Object field_args = [type_] elif type_ in self.type_annotation_map: # use best field type for the type hint provided field, field_kwargs = self.type_annotation_map[type_] # type: ignore[assignment] if field: field_kwargs = { "multi": multi, "required": required, **field_kwargs, } value = field(*field_args, **field_kwargs) if name in attrs: # this field has a right-side value, which can be field # instance on its own or wrapped with mapped_field() attr_value = attrs[name] if isinstance(attr_value, dict): # the mapped_field() wrapper function was used so we need # to look for the field instance and also record any # dataclass-style defaults attr_value = attrs[name].get("_field") default_value = attrs[name].get("default") or attrs[name].get( "default_factory" ) if default_value: field_defaults[name] = default_value if attr_value: value = attr_value if required is not None: value._required = required if multi is not None: value._multi = multi if value is None: raise TypeError(f"Cannot map field {name}") self.mapping.field(name, value) if name in attrs: del attrs[name] # store dataclass-style defaults for ObjectBase.__init__ to assign attrs["_defaults"] = field_defaults # add all the mappings for meta fields for name in dir(meta): if isinstance(getattr(meta, name, None), MetaField): params = getattr(meta, name) self.mapping.meta(name, *params.args, **params.kwargs) # document inheritance - include the fields from parents' mappings for b in bases: if hasattr(b, "_doc_type") and hasattr(b._doc_type, "mapping"): self.mapping.update(b._doc_type.mapping, update_only=True) @property def name(self) -> str: return self.mapping.properties.name _FieldType = TypeVar("_FieldType") class Mapped(Generic[_FieldType]): """Class that represents the type of a mapped field. This class can be used as an optional wrapper on a field type to help type checkers assign the correct type when the field is used as a class attribute. Consider the following definitions:: class MyDocument(Document): first: str second: M[str] mydoc = MyDocument(first="1", second="2") Type checkers have no trouble inferring the type of both ``mydoc.first`` and ``mydoc.second`` as ``str``, but while ``MyDocument.first`` will be incorrectly typed as ``str``, ``MyDocument.second`` should be assigned the correct ``InstrumentedField`` type. """ __slots__: Dict[str, Any] = {} if TYPE_CHECKING: @overload def __get__(self, instance: None, owner: Any) -> InstrumentedField: ... @overload def __get__(self, instance: object, owner: Any) -> _FieldType: ... def __get__( self, instance: Optional[object], owner: Any ) -> Union[InstrumentedField, _FieldType]: ... def __set__(self, instance: Optional[object], value: _FieldType) -> None: ... def __delete__(self, instance: Any) -> None: ... M = Mapped def mapped_field( field: Optional[Field] = None, *, init: bool = True, default: Any = None, default_factory: Optional[Callable[[], Any]] = None, **kwargs: Any, ) -> Any: """Construct a field using dataclass behaviors This function can be used in the right side of a document field definition as a wrapper for the field instance or as a way to provide dataclass-compatible options. :param field: The instance of ``Field`` to use for this field. If not provided, an instance that is appropriate for the type given to the field is used. :param init: a value of ``True`` adds this field to the constructor, and a value of ``False`` omits it from it. The default is ``True``. :param default: a default value to use for this field when one is not provided explicitly. :param default_factory: a callable that returns a default value for the field, when one isn't provided explicitly. Only one of ``factory`` and ``default_factory`` can be used. """ return { "_field": field, "init": init, "default": default, "default_factory": default_factory, **kwargs, } @dataclass_transform(field_specifiers=(mapped_field,)) class InnerDoc(ObjectBase, metaclass=DocumentMeta): """ Common class for inner documents like Object or Nested """ @classmethod def from_es( cls, data: Union[Dict[str, Any], "ObjectApiResponse[Any]"], data_only: bool = False, ) -> "InnerDoc": if data_only: data = {"_source": data} return super().from_es(data) class DocumentBase(ObjectBase): """ Model-like class for persisting documents in elasticsearch. """ @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: if cls._index._name is None: return True return fnmatch(hit.get("_index", ""), cls._index._name) @classmethod def _default_index(cls, index: Optional[str] = None) -> str: return index or cls._index._name def _get_index( self, index: Optional[str] = None, required: bool = True ) -> Optional[str]: if index is None: index = getattr(self.meta, "index", None) if index is None: index = getattr(self._index, "_name", None) if index is None and required: raise ValidationException("No index") if index and "*" in index: raise ValidationException("You cannot write to a wildcard index.") return index def __repr__(self) -> str: return "{}({})".format( self.__class__.__name__, ", ".join( f"{key}={getattr(self.meta, key)!r}" for key in ("index", "id") if key in self.meta ), ) def to_dict(self, include_meta: bool = False, skip_empty: bool = True) -> Dict[str, Any]: # type: ignore[override] """ Serialize the instance into a dictionary so that it can be saved in elasticsearch. :arg include_meta: if set to ``True`` will include all the metadata (``_index``, ``_id`` etc). Otherwise just the document's data is serialized. This is useful when passing multiple instances into ``elasticsearch.helpers.bulk``. :arg skip_empty: if set to ``False`` will cause empty values (``None``, ``[]``, ``{}``) to be left on the document. Those values will be stripped out otherwise as they make no difference in elasticsearch. """ d = super().to_dict(skip_empty=skip_empty) if not include_meta: return d meta = {"_" + k: self.meta[k] for k in DOC_META_FIELDS if k in self.meta} # in case of to_dict include the index unlike save/update/delete index = self._get_index(required=False) if index is not None: meta["_index"] = index meta["_source"] = d return meta @classmethod def _get_field_names( cls, for_esql: bool = False, nested_class: Optional[type[InnerDoc]] = None ) -> List[str]: """Return the list of field names used by this document. If the document has nested objects, their fields are reported using dot notation. If the ``for_esql`` argument is set to ``True``, the list omits nested fields, which are currently unsupported in ES|QL. """ fields = [] class_ = nested_class or cls for field_name in class_._doc_type.mapping: field = class_._doc_type.mapping[field_name] if isinstance(field, Object): if for_esql and isinstance(field, Nested): # ES|QL does not recognize Nested fields at this time continue sub_fields = cls._get_field_names( for_esql=for_esql, nested_class=field._doc_class ) for sub_field in sub_fields: fields.append(f"{field_name}.{sub_field}") else: fields.append(field_name) return fields @classmethod def esql_from(cls) -> "ESQLBase": """Return a base ES|QL query for instances of this document class. The returned query is initialized with ``FROM`` and ``KEEP`` statements, and can be completed as desired. """ from ..esql import ESQL # here to avoid circular imports return ( ESQL.from_(cls) .metadata("_id") .keep("_id", *tuple(cls._get_field_names(for_esql=True))) ) python-elasticsearch-9.1.1/elasticsearch/dsl/exceptions.py000066400000000000000000000020231506101734100237750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. class ElasticsearchDslException(Exception): pass class UnknownDslObject(ElasticsearchDslException): pass class ValidationException(ValueError, ElasticsearchDslException): pass class IllegalOperation(ElasticsearchDslException): pass python-elasticsearch-9.1.1/elasticsearch/dsl/faceted_search.py000066400000000000000000000021061506101734100245360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.faceted_search import AsyncFacetedSearch # noqa: F401 from ._sync.faceted_search import FacetedSearch # noqa: F401 from .faceted_search_base import ( # noqa: F401 DateHistogramFacet, Facet, FacetedResponse, HistogramFacet, NestedFacet, RangeFacet, TermsFacet, ) python-elasticsearch-9.1.1/elasticsearch/dsl/faceted_search_base.py000066400000000000000000000357721506101734100255470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime, timedelta from typing import ( TYPE_CHECKING, Any, Dict, Generic, List, Optional, Sequence, Tuple, Type, Union, cast, ) from typing_extensions import Self from .aggs import A, Agg from .query import MatchAll, Nested, Query, Range, Terms from .response import Response from .utils import _R, AttrDict if TYPE_CHECKING: from .document_base import DocumentBase from .response.aggs import BucketData from .search_base import SearchBase FilterValueType = Union[str, int, float, bool] __all__ = [ "FacetedSearchBase", "HistogramFacet", "TermsFacet", "DateHistogramFacet", "RangeFacet", "NestedFacet", ] class Facet(Generic[_R]): """ A facet on faceted search. Wraps and aggregation and provides functionality to create a filter for selected values and return a list of facet values from the result of the aggregation. """ agg_type: str = "" def __init__( self, metric: Optional[Agg[_R]] = None, metric_sort: str = "desc", **kwargs: Any ): self.filter_values = () self._params = kwargs self._metric = metric if metric and metric_sort: self._params["order"] = {"metric": metric_sort} def get_aggregation(self) -> Agg[_R]: """ Return the aggregation object. """ agg: Agg[_R] = A(self.agg_type, **self._params) if self._metric: agg.metric("metric", self._metric) return agg def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: """ Construct a filter. """ if not filter_values: return None f = self.get_value_filter(filter_values[0]) for v in filter_values[1:]: f |= self.get_value_filter(v) return f def get_value_filter(self, filter_value: FilterValueType) -> Query: # type: ignore[empty-body] """ Construct a filter for an individual value """ pass def is_filtered(self, key: str, filter_values: List[FilterValueType]) -> bool: """ Is a filter active on the given key. """ return key in filter_values def get_value(self, bucket: "BucketData[_R]") -> Any: """ return a value representing a bucket. Its key as default. """ return bucket["key"] def get_metric(self, bucket: "BucketData[_R]") -> int: """ Return a metric, by default doc_count for a bucket. """ if self._metric: return cast(int, bucket["metric"]["value"]) return cast(int, bucket["doc_count"]) def get_values( self, data: "BucketData[_R]", filter_values: List[FilterValueType] ) -> List[Tuple[Any, int, bool]]: """ Turn the raw bucket data into a list of tuples containing the key, number of documents and a flag indicating whether this value has been selected or not. """ out = [] for bucket in data.buckets: b = cast("BucketData[_R]", bucket) key = self.get_value(b) out.append((key, self.get_metric(b), self.is_filtered(key, filter_values))) return out class TermsFacet(Facet[_R]): agg_type = "terms" def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: """Create a terms filter instead of bool containing term filters.""" if filter_values: return Terms(self._params["field"], filter_values, _expand__to_dot=False) return None class RangeFacet(Facet[_R]): agg_type = "range" def _range_to_dict( self, range: Tuple[Any, Tuple[Optional[int], Optional[int]]] ) -> Dict[str, Any]: key, _range = range out: Dict[str, Any] = {"key": key} if _range[0] is not None: out["from"] = _range[0] if _range[1] is not None: out["to"] = _range[1] return out def __init__( self, ranges: Sequence[Tuple[Any, Tuple[Optional[int], Optional[int]]]], **kwargs: Any, ): super().__init__(**kwargs) self._params["ranges"] = list(map(self._range_to_dict, ranges)) self._params["keyed"] = False self._ranges = dict(ranges) def get_value_filter(self, filter_value: FilterValueType) -> Query: f, t = self._ranges[filter_value] limits: Dict[str, Any] = {} if f is not None: limits["gte"] = f if t is not None: limits["lt"] = t return Range(self._params["field"], limits, _expand__to_dot=False) class HistogramFacet(Facet[_R]): agg_type = "histogram" def get_value_filter(self, filter_value: FilterValueType) -> Range: return Range( self._params["field"], { "gte": filter_value, "lt": filter_value + self._params["interval"], }, _expand__to_dot=False, ) def _date_interval_year(d: datetime) -> datetime: return d.replace( year=d.year + 1, day=(28 if d.month == 2 and d.day == 29 else d.day) ) def _date_interval_month(d: datetime) -> datetime: return (d + timedelta(days=32)).replace(day=1) def _date_interval_week(d: datetime) -> datetime: return d + timedelta(days=7) def _date_interval_day(d: datetime) -> datetime: return d + timedelta(days=1) def _date_interval_hour(d: datetime) -> datetime: return d + timedelta(hours=1) class DateHistogramFacet(Facet[_R]): agg_type = "date_histogram" DATE_INTERVALS = { "year": _date_interval_year, "1Y": _date_interval_year, "month": _date_interval_month, "1M": _date_interval_month, "week": _date_interval_week, "1w": _date_interval_week, "day": _date_interval_day, "1d": _date_interval_day, "hour": _date_interval_hour, "1h": _date_interval_hour, } def __init__(self, **kwargs: Any): kwargs.setdefault("min_doc_count", 0) super().__init__(**kwargs) def get_value(self, bucket: "BucketData[_R]") -> Any: if not isinstance(bucket["key"], datetime): # Elasticsearch returns key=None instead of 0 for date 1970-01-01, # so we need to set key to 0 to avoid TypeError exception if bucket["key"] is None: bucket["key"] = 0 # Preserve milliseconds in the datetime return datetime.utcfromtimestamp(int(cast(int, bucket["key"])) / 1000.0) else: return bucket["key"] def get_value_filter(self, filter_value: Any) -> Range: for interval_type in ("calendar_interval", "fixed_interval"): if interval_type in self._params: break else: interval_type = "interval" return Range( self._params["field"], { "gte": filter_value, "lt": self.DATE_INTERVALS[self._params[interval_type]](filter_value), }, _expand__to_dot=False, ) class NestedFacet(Facet[_R]): agg_type = "nested" def __init__(self, path: str, nested_facet: Facet[_R]): self._path = path self._inner = nested_facet super().__init__(path=path, aggs={"inner": nested_facet.get_aggregation()}) def get_values( self, data: "BucketData[_R]", filter_values: List[FilterValueType] ) -> List[Tuple[Any, int, bool]]: return self._inner.get_values(data.inner, filter_values) def add_filter(self, filter_values: List[FilterValueType]) -> Optional[Query]: inner_q = self._inner.add_filter(filter_values) if inner_q: return Nested(path=self._path, query=inner_q) return None class FacetedResponse(Response[_R]): if TYPE_CHECKING: _faceted_search: "FacetedSearchBase[_R]" _facets: Dict[str, List[Tuple[Any, int, bool]]] @property def query_string(self) -> Optional[Union[str, Query]]: return self._faceted_search._query @property def facets(self) -> Dict[str, List[Tuple[Any, int, bool]]]: if not hasattr(self, "_facets"): super(AttrDict, self).__setattr__("_facets", AttrDict({})) for name, facet in self._faceted_search.facets.items(): self._facets[name] = facet.get_values( getattr(getattr(self.aggregations, "_filter_" + name), name), self._faceted_search.filter_values.get(name, []), ) return self._facets class FacetedSearchBase(Generic[_R]): """ Abstraction for creating faceted navigation searches that takes care of composing the queries, aggregations and filters as needed as well as presenting the results in an easy-to-consume fashion:: class BlogSearch(FacetedSearch): index = 'blogs' doc_types = [Blog, Post] fields = ['title^5', 'category', 'description', 'body'] facets = { 'type': TermsFacet(field='_type'), 'category': TermsFacet(field='category'), 'weekly_posts': DateHistogramFacet(field='published_from', interval='week') } def search(self): ' Override search to add your own filters ' s = super(BlogSearch, self).search() return s.filter('term', published=True) # when using: blog_search = BlogSearch("web framework", filters={"category": "python"}) # supports pagination blog_search[10:20] response = blog_search.execute() # easy access to aggregation results: for category, hit_count, is_selected in response.facets.category: print( "Category %s has %d hits%s." % ( category, hit_count, ' and is chosen' if is_selected else '' ) ) """ index: Optional[str] = None doc_types: Optional[List[Union[str, Type["DocumentBase"]]]] = None fields: Sequence[str] = [] facets: Dict[str, Facet[_R]] = {} using = "default" if TYPE_CHECKING: def search(self) -> "SearchBase[_R]": ... def __init__( self, query: Optional[Union[str, Query]] = None, filters: Dict[str, FilterValueType] = {}, sort: Sequence[str] = [], ): """ :arg query: the text to search for :arg filters: facet values to filter :arg sort: sort information to be passed to :class:`~elasticsearch.dsl.Search` """ self._query = query self._filters: Dict[str, Query] = {} self._sort = sort self.filter_values: Dict[str, List[FilterValueType]] = {} for name, value in filters.items(): self.add_filter(name, value) self._s = self.build_search() def __getitem__(self, k: Union[int, slice]) -> Self: self._s = self._s[k] return self def add_filter( self, name: str, filter_values: Union[FilterValueType, List[FilterValueType]] ) -> None: """ Add a filter for a facet. """ # normalize the value into a list if not isinstance(filter_values, (tuple, list)): if filter_values is None: return filter_values = [ filter_values, ] # remember the filter values for use in FacetedResponse self.filter_values[name] = filter_values # get the filter from the facet f = self.facets[name].add_filter(filter_values) if f is None: return self._filters[name] = f def query( self, search: "SearchBase[_R]", query: Union[str, Query] ) -> "SearchBase[_R]": """ Add query part to ``search``. Override this if you wish to customize the query used. """ if query: if self.fields: return search.query("multi_match", fields=self.fields, query=query) else: return search.query("multi_match", query=query) return search def aggregate(self, search: "SearchBase[_R]") -> None: """ Add aggregations representing the facets selected, including potential filters. """ for f, facet in self.facets.items(): agg = facet.get_aggregation() agg_filter: Query = MatchAll() for field, filter in self._filters.items(): if f == field: continue agg_filter &= filter search.aggs.bucket("_filter_" + f, "filter", filter=agg_filter).bucket( f, agg ) def filter(self, search: "SearchBase[_R]") -> "SearchBase[_R]": """ Add a ``post_filter`` to the search request narrowing the results based on the facet filters. """ if not self._filters: return search post_filter: Query = MatchAll() for f in self._filters.values(): post_filter &= f return search.post_filter(post_filter) def highlight(self, search: "SearchBase[_R]") -> "SearchBase[_R]": """ Add highlighting for all the fields """ return search.highlight( *(f if "^" not in f else f.split("^", 1)[0] for f in self.fields) ) def sort(self, search: "SearchBase[_R]") -> "SearchBase[_R]": """ Add sorting information to the request. """ if self._sort: search = search.sort(*self._sort) return search def params(self, **kwargs: Any) -> None: """ Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.search for all available parameters. """ self._s = self._s.params(**kwargs) def build_search(self) -> "SearchBase[_R]": """ Construct the ``Search`` object. """ s = self.search() if self._query is not None: s = self.query(s, self._query) s = self.filter(s) if self.fields: s = self.highlight(s) s = self.sort(s) self.aggregate(s) return s python-elasticsearch-9.1.1/elasticsearch/dsl/field.py000066400000000000000000005053171506101734100227150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import collections.abc import ipaddress from copy import deepcopy from datetime import date, datetime from typing import ( TYPE_CHECKING, Any, Dict, Iterable, Iterator, Literal, Mapping, Optional, Sequence, Tuple, Type, Union, cast, ) from dateutil import parser, tz from elastic_transport.client_utils import DEFAULT, DefaultType from .exceptions import ValidationException from .query import Q from .utils import AttrDict, AttrList, DslBase from .wrappers import Range if TYPE_CHECKING: from datetime import tzinfo from ipaddress import IPv4Address, IPv6Address from _operator import _SupportsComparison from . import types from .document import InnerDoc from .document_base import InstrumentedField from .mapping_base import MappingBase from .query import Query unicode = str def construct_field( name_or_field: Union[ str, "Field", Dict[str, Any], ], **params: Any, ) -> "Field": # {"type": "text", "analyzer": "snowball"} if isinstance(name_or_field, collections.abc.Mapping): if params: raise ValueError( "construct_field() cannot accept parameters when passing in a dict." ) params = deepcopy(name_or_field) if "type" not in params: # inner object can be implicitly defined if "properties" in params: name = "object" else: raise ValueError('construct_field() needs to have a "type" key.') else: name = params.pop("type") return Field.get_dsl_class(name)(**params) # Text() if isinstance(name_or_field, Field): if params: raise ValueError( "construct_field() cannot accept parameters " "when passing in a construct_field object." ) return name_or_field # "text", analyzer="snowball" return Field.get_dsl_class(name_or_field)(**params) class Field(DslBase): _type_name = "field" _type_shortcut = staticmethod(construct_field) # all fields can be multifields _param_defs = {"fields": {"type": "field", "hash": True}} name = "" _coerce = False def __init__( self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any ): """ :arg bool multi: specifies whether field can contain array of values :arg bool required: specifies whether field is required """ self._multi = multi self._required = required super().__init__(*args, **kwargs) def __getitem__(self, subfield: str) -> "Field": return cast(Field, self._params.get("fields", {})[subfield]) def _serialize(self, data: Any, skip_empty: bool) -> Any: return data def _safe_serialize(self, data: Any, skip_empty: bool) -> Any: try: return self._serialize(data, skip_empty) except TypeError: # older method signature, without skip_empty return self._serialize(data) # type: ignore[call-arg] def _deserialize(self, data: Any) -> Any: return data def _empty(self) -> Optional[Any]: return None def empty(self) -> Optional[Any]: if self._multi: return AttrList([]) return self._empty() def serialize(self, data: Any, skip_empty: bool = True) -> Any: if isinstance(data, (list, AttrList, tuple)): return list( map( self._safe_serialize, cast(Iterable[Any], data), [skip_empty] * len(data), ) ) return self._safe_serialize(data, skip_empty) def deserialize(self, data: Any) -> Any: if isinstance(data, (list, AttrList, tuple)): data = [ None if d is None else self._deserialize(d) for d in cast(Iterable[Any], data) ] return data if data is None: return None return self._deserialize(data) def clean(self, data: Any) -> Any: if data is not None: data = self.deserialize(data) if data in (None, [], {}) and self._required: raise ValidationException("Value required for this field.") return data def to_dict(self) -> Dict[str, Any]: d = super().to_dict() name, value = cast(Tuple[str, Dict[str, Any]], d.popitem()) value["type"] = name return value class CustomField(Field): name = "custom" _coerce = True def to_dict(self) -> Dict[str, Any]: if isinstance(self.builtin_type, Field): return self.builtin_type.to_dict() d = super().to_dict() d["type"] = self.builtin_type return d class RangeField(Field): _coerce = True _core_field: Optional[Field] = None def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: if isinstance(data, Range): return data data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] return Range(data) def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]: if data is None: return None if not isinstance(data, collections.abc.Mapping): data = data.to_dict() return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] class Float(Field): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "float" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> float: return float(data) class Integer(Field): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "integer" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> int: return int(data) class Object(Field): """ :arg doc_class: base doc class that handles mapping. If no `doc_class` is provided, new instance of `InnerDoc` will be created, populated with `properties` and used. Can not be provided together with `properties` :arg enabled: :arg subobjects: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "object" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, doc_class: Union[Type["InnerDoc"], "DefaultType"] = DEFAULT, *args: Any, enabled: Union[bool, "DefaultType"] = DEFAULT, subobjects: Union[ Literal["true", "false", "auto"], bool, "DefaultType" ] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if enabled is not DEFAULT: kwargs["enabled"] = enabled if subobjects is not DEFAULT: kwargs["subobjects"] = subobjects if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep if doc_class is not DEFAULT and ( properties is not DEFAULT or dynamic is not DEFAULT ): raise ValidationException( "doc_class and properties/dynamic should not be provided together" ) if doc_class is not DEFAULT: self._doc_class: Type["InnerDoc"] = doc_class else: # FIXME import from .document import InnerDoc # no InnerDoc subclass, creating one instead... self._doc_class = type("InnerDoc", (InnerDoc,), {}) for name, field in ( properties if properties is not DEFAULT else {} ).items(): self._doc_class._doc_type.mapping.field(name, field) if "properties" in kwargs: del kwargs["properties"] if dynamic is not DEFAULT: self._doc_class._doc_type.mapping.meta("dynamic", dynamic) self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping) super().__init__(**kwargs) def __getitem__(self, name: str) -> Field: return self._mapping[name] def __contains__(self, name: str) -> bool: return name in self._mapping def _empty(self) -> "InnerDoc": return self._wrap({}) def _wrap(self, data: Dict[str, Any]) -> "InnerDoc": return self._doc_class.from_es(data, data_only=True) def empty(self) -> Union["InnerDoc", AttrList[Any]]: if self._multi: return AttrList[Any]([], self._wrap) return self._empty() def to_dict(self) -> Dict[str, Any]: d = self._mapping.to_dict() d.update(super().to_dict()) return d def _collect_fields(self) -> Iterator[Field]: return self._mapping.properties._collect_fields() def _deserialize(self, data: Any) -> "InnerDoc": # don't wrap already wrapped data if isinstance(data, self._doc_class): return data if isinstance(data, AttrDict): data = data._d_ return self._wrap(data) def _serialize( self, data: Optional[Union[Dict[str, Any], "InnerDoc"]], skip_empty: bool ) -> Optional[Dict[str, Any]]: if data is None: return None # somebody assigned raw dict to the field, we should tolerate that if isinstance(data, collections.abc.Mapping): return data return data.to_dict(skip_empty=skip_empty) def clean(self, data: Any) -> Any: data = super().clean(data) if data is None: return None if isinstance(data, (list, AttrList)): for d in cast(Iterator["InnerDoc"], data): d.full_clean() else: data.full_clean() return data def update(self, other: Any, update_only: bool = False) -> None: if not isinstance(other, Object): # not an inner/nested object, no merge possible return self._mapping.update(other._mapping, update_only) class AggregateMetricDouble(Field): """ :arg default_metric: (required) :arg metrics: (required) :arg ignore_malformed: :arg time_series_metric: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "aggregate_metric_double" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, default_metric: Union[str, "DefaultType"] = DEFAULT, metrics: Union[Sequence[str], "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if default_metric is not DEFAULT: kwargs["default_metric"] = default_metric if metrics is not DEFAULT: kwargs["metrics"] = metrics if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Alias(Field): """ :arg path: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "alias" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if path is not DEFAULT: kwargs["path"] = str(path) if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Binary(Field): """ :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "binary" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def clean(self, data: str) -> str: # Binary fields are opaque, so there's not much cleaning # that can be done. return data def _deserialize(self, data: Any) -> bytes: return base64.b64decode(data) def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]: if data is None: return None return base64.b64encode(data).decode() class Boolean(Field): """ :arg boost: :arg fielddata: :arg index: :arg null_value: :arg ignore_malformed: :arg script: :arg on_script_error: :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "boolean" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, fielddata: Union[ "types.NumericFielddata", Dict[str, Any], "DefaultType" ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if fielddata is not DEFAULT: kwargs["fielddata"] = fielddata if index is not DEFAULT: kwargs["index"] = index if null_value is not DEFAULT: kwargs["null_value"] = null_value if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if script is not DEFAULT: kwargs["script"] = script if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> bool: if data == "false": return False return bool(data) def clean(self, data: Any) -> Optional[bool]: if data is not None: data = self.deserialize(data) if data is None and self._required: raise ValidationException("Value required for this field.") return data # type: ignore[no-any-return] class Byte(Integer): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "byte" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Completion(Field): """ :arg analyzer: :arg contexts: :arg max_input_length: :arg preserve_position_increments: :arg preserve_separators: :arg search_analyzer: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "completion" _param_defs = { "analyzer": {"type": "analyzer"}, "search_analyzer": {"type": "analyzer"}, "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, contexts: Union[ Sequence["types.SuggestContext"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, max_input_length: Union[int, "DefaultType"] = DEFAULT, preserve_position_increments: Union[bool, "DefaultType"] = DEFAULT, preserve_separators: Union[bool, "DefaultType"] = DEFAULT, search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if contexts is not DEFAULT: kwargs["contexts"] = contexts if max_input_length is not DEFAULT: kwargs["max_input_length"] = max_input_length if preserve_position_increments is not DEFAULT: kwargs["preserve_position_increments"] = preserve_position_increments if preserve_separators is not DEFAULT: kwargs["preserve_separators"] = preserve_separators if search_analyzer is not DEFAULT: kwargs["search_analyzer"] = search_analyzer if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class ConstantKeyword(Field): """ :arg value: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "constant_keyword" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, value: Any = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class CountedKeyword(Field): """ :arg index: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "counted_keyword" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, index: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if index is not DEFAULT: kwargs["index"] = index if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Date(Field): """ :arg default_timezone: timezone that will be automatically used for tz-naive values May be instance of `datetime.tzinfo` or string containing TZ offset :arg boost: :arg fielddata: :arg format: :arg ignore_malformed: :arg index: :arg script: :arg on_script_error: :arg null_value: :arg precision_step: :arg locale: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "date" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, default_timezone: Union[str, "tzinfo", "DefaultType"] = DEFAULT, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, fielddata: Union[ "types.NumericFielddata", Dict[str, Any], "DefaultType" ] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, null_value: Any = DEFAULT, precision_step: Union[int, "DefaultType"] = DEFAULT, locale: Union[str, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if fielddata is not DEFAULT: kwargs["fielddata"] = fielddata if format is not DEFAULT: kwargs["format"] = format if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if script is not DEFAULT: kwargs["script"] = script if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if null_value is not DEFAULT: kwargs["null_value"] = null_value if precision_step is not DEFAULT: kwargs["precision_step"] = precision_step if locale is not DEFAULT: kwargs["locale"] = locale if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep if default_timezone is DEFAULT: self._default_timezone = None elif isinstance(default_timezone, str): self._default_timezone = tz.gettz(default_timezone) else: self._default_timezone = default_timezone super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Union[datetime, date]: if isinstance(data, str): try: data = parser.parse(data) except Exception as e: raise ValidationException( f"Could not parse date from the value ({data!r})", e ) # we treat the yyyy-MM-dd format as a special case if hasattr(self, "format") and self.format == "yyyy-MM-dd": data = data.date() if isinstance(data, datetime): if self._default_timezone and data.tzinfo is None: data = data.replace(tzinfo=self._default_timezone) return cast(datetime, data) if isinstance(data, date): return data if isinstance(data, int): # Divide by a float to preserve milliseconds on the datetime. return datetime.utcfromtimestamp(data / 1000.0) raise ValidationException(f"Could not parse date from the value ({data!r})") class DateNanos(Field): """ :arg boost: :arg format: :arg ignore_malformed: :arg index: :arg script: :arg on_script_error: :arg null_value: :arg precision_step: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "date_nanos" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, format: Union[str, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, null_value: Any = DEFAULT, precision_step: Union[int, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if format is not DEFAULT: kwargs["format"] = format if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if script is not DEFAULT: kwargs["script"] = script if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if null_value is not DEFAULT: kwargs["null_value"] = null_value if precision_step is not DEFAULT: kwargs["precision_step"] = precision_step if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class DateRange(RangeField): """ :arg format: :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "date_range" _core_field = Date() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, format: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if format is not DEFAULT: kwargs["format"] = format if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class DenseVector(Field): """ :arg dims: Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of the first vector added to the field. :arg element_type: The data type used to encode vectors. The supported data types are `float` (default), `byte`, and `bit`. Defaults to `float` if omitted. :arg index: If `true`, you can search this field using the kNN search API. Defaults to `True` if omitted. :arg index_options: An optional section that configures the kNN indexing algorithm. The HNSW algorithm has two internal parameters that influence how the data structure is built. These can be adjusted to improve the accuracy of results, at the expense of slower indexing speed. This parameter can only be specified when `index` is `true`. :arg similarity: The vector similarity metric to use in kNN search. Documents are ranked by their vector field's similarity to the query vector. The `_score` of each document will be derived from the similarity, in a way that ensures scores are positive and that a larger score corresponds to a higher ranking. Defaults to `l2_norm` when `element_type` is `bit` otherwise defaults to `cosine`. `bit` vectors only support `l2_norm` as their similarity metric. This parameter can only be specified when `index` is `true`. :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "dense_vector" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, dims: Union[int, "DefaultType"] = DEFAULT, element_type: Union[Literal["bit", "byte", "float"], "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ "types.DenseVectorIndexOptions", Dict[str, Any], "DefaultType" ] = DEFAULT, similarity: Union[ Literal["cosine", "dot_product", "l2_norm", "max_inner_product"], "DefaultType", ] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if dims is not DEFAULT: kwargs["dims"] = dims if element_type is not DEFAULT: kwargs["element_type"] = element_type if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options if similarity is not DEFAULT: kwargs["similarity"] = similarity if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep self._element_type = kwargs.get("element_type", "float") if self._element_type in ["float", "byte"]: kwargs["multi"] = True super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Any: if self._element_type == "float": return float(data) elif self._element_type == "byte": return int(data) return data class Double(Float): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "double" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class DoubleRange(RangeField): """ :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "double_range" _core_field = Double() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Flattened(Field): """ :arg boost: :arg depth_limit: :arg doc_values: :arg eager_global_ordinals: :arg index: :arg index_options: :arg null_value: :arg similarity: :arg split_queries_on_whitespace: :arg time_series_dimensions: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "flattened" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, depth_limit: Union[int, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ Literal["docs", "freqs", "positions", "offsets"], "DefaultType" ] = DEFAULT, null_value: Union[str, "DefaultType"] = DEFAULT, similarity: Union[str, "DefaultType"] = DEFAULT, split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT, time_series_dimensions: Union[Sequence[str], "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if depth_limit is not DEFAULT: kwargs["depth_limit"] = depth_limit if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if eager_global_ordinals is not DEFAULT: kwargs["eager_global_ordinals"] = eager_global_ordinals if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options if null_value is not DEFAULT: kwargs["null_value"] = null_value if similarity is not DEFAULT: kwargs["similarity"] = similarity if split_queries_on_whitespace is not DEFAULT: kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace if time_series_dimensions is not DEFAULT: kwargs["time_series_dimensions"] = time_series_dimensions if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class FloatRange(RangeField): """ :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "float_range" _core_field = Float() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class GeoPoint(Field): """ :arg ignore_malformed: :arg ignore_z_value: :arg null_value: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "geo_point" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[ "types.LatLonGeoLocation", "types.GeoHashLocation", Sequence[float], str, Dict[str, Any], "DefaultType", ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "position"], "DefaultType" ] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if ignore_z_value is not DEFAULT: kwargs["ignore_z_value"] = ignore_z_value if null_value is not DEFAULT: kwargs["null_value"] = null_value if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class GeoShape(Field): """ The `geo_shape` data type facilitates the indexing of and searching with arbitrary geo shapes such as rectangles and polygons. :arg coerce: :arg ignore_malformed: :arg ignore_z_value: :arg index: :arg orientation: :arg strategy: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "geo_shape" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT, strategy: Union[Literal["recursive", "term"], "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if ignore_z_value is not DEFAULT: kwargs["ignore_z_value"] = ignore_z_value if index is not DEFAULT: kwargs["index"] = index if orientation is not DEFAULT: kwargs["orientation"] = orientation if strategy is not DEFAULT: kwargs["strategy"] = strategy if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class HalfFloat(Float): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "half_float" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Histogram(Field): """ :arg ignore_malformed: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "histogram" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class IcuCollationKeyword(Field): """ :arg norms: :arg index_options: :arg index: Should the field be searchable? :arg null_value: Accepts a string value which is substituted for any explicit null values. Defaults to null, which means the field is treated as missing. :arg rules: :arg language: :arg country: :arg variant: :arg strength: :arg decomposition: :arg alternate: :arg case_level: :arg case_first: :arg numeric: :arg variable_top: :arg hiragana_quaternary_mode: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "icu_collation_keyword" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, norms: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ Literal["docs", "freqs", "positions", "offsets"], "DefaultType" ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[str, "DefaultType"] = DEFAULT, rules: Union[str, "DefaultType"] = DEFAULT, language: Union[str, "DefaultType"] = DEFAULT, country: Union[str, "DefaultType"] = DEFAULT, variant: Union[str, "DefaultType"] = DEFAULT, strength: Union[ Literal["primary", "secondary", "tertiary", "quaternary", "identical"], "DefaultType", ] = DEFAULT, decomposition: Union[Literal["no", "identical"], "DefaultType"] = DEFAULT, alternate: Union[Literal["shifted", "non-ignorable"], "DefaultType"] = DEFAULT, case_level: Union[bool, "DefaultType"] = DEFAULT, case_first: Union[Literal["lower", "upper"], "DefaultType"] = DEFAULT, numeric: Union[bool, "DefaultType"] = DEFAULT, variable_top: Union[str, "DefaultType"] = DEFAULT, hiragana_quaternary_mode: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if norms is not DEFAULT: kwargs["norms"] = norms if index_options is not DEFAULT: kwargs["index_options"] = index_options if index is not DEFAULT: kwargs["index"] = index if null_value is not DEFAULT: kwargs["null_value"] = null_value if rules is not DEFAULT: kwargs["rules"] = rules if language is not DEFAULT: kwargs["language"] = language if country is not DEFAULT: kwargs["country"] = country if variant is not DEFAULT: kwargs["variant"] = variant if strength is not DEFAULT: kwargs["strength"] = strength if decomposition is not DEFAULT: kwargs["decomposition"] = decomposition if alternate is not DEFAULT: kwargs["alternate"] = alternate if case_level is not DEFAULT: kwargs["case_level"] = case_level if case_first is not DEFAULT: kwargs["case_first"] = case_first if numeric is not DEFAULT: kwargs["numeric"] = numeric if variable_top is not DEFAULT: kwargs["variable_top"] = variable_top if hiragana_quaternary_mode is not DEFAULT: kwargs["hiragana_quaternary_mode"] = hiragana_quaternary_mode if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class IntegerRange(RangeField): """ :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "integer_range" _core_field = Integer() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Ip(Field): """ :arg boost: :arg index: :arg ignore_malformed: :arg null_value: :arg on_script_error: :arg script: :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "ip" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[str, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if index is not DEFAULT: kwargs["index"] = index if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if null_value is not DEFAULT: kwargs["null_value"] = null_value if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: # the ipaddress library for pypy only accepts unicode. return ipaddress.ip_address(unicode(data)) def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]: if data is None: return None return str(data) class IpRange(Field): """ :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "ip_range" _core_field = Ip() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Join(Field): """ :arg relations: :arg eager_global_ordinals: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "join" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, relations: Union[ Mapping[str, Union[str, Sequence[str]]], "DefaultType" ] = DEFAULT, eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if relations is not DEFAULT: kwargs["relations"] = relations if eager_global_ordinals is not DEFAULT: kwargs["eager_global_ordinals"] = eager_global_ordinals if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Keyword(Field): """ :arg boost: :arg eager_global_ordinals: :arg index: :arg index_options: :arg script: :arg on_script_error: :arg normalizer: :arg norms: :arg null_value: :arg similarity: :arg split_queries_on_whitespace: :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "keyword" _param_defs = { "normalizer": {"type": "normalizer"}, "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ Literal["docs", "freqs", "positions", "offsets"], "DefaultType" ] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, normalizer: Union[str, DslBase, "DefaultType"] = DEFAULT, norms: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[str, "DefaultType"] = DEFAULT, similarity: Union[str, None, "DefaultType"] = DEFAULT, split_queries_on_whitespace: Union[bool, "DefaultType"] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if eager_global_ordinals is not DEFAULT: kwargs["eager_global_ordinals"] = eager_global_ordinals if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options if script is not DEFAULT: kwargs["script"] = script if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if normalizer is not DEFAULT: kwargs["normalizer"] = normalizer if norms is not DEFAULT: kwargs["norms"] = norms if null_value is not DEFAULT: kwargs["null_value"] = null_value if similarity is not DEFAULT: kwargs["similarity"] = similarity if split_queries_on_whitespace is not DEFAULT: kwargs["split_queries_on_whitespace"] = split_queries_on_whitespace if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Long(Integer): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "long" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class LongRange(RangeField): """ :arg boost: :arg coerce: :arg index: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "long_range" _core_field = Long() _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if index is not DEFAULT: kwargs["index"] = index if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class MatchOnlyText(Field): """ A variant of text that trades scoring and efficiency of positional queries for space efficiency. This field effectively stores data the same way as a text field that only indexes documents (index_options: docs) and disables norms (norms: false). Term queries perform as fast if not faster as on text fields, however queries that need positions such as the match_phrase query perform slower as they need to look at the _source document to verify whether a phrase matches. All queries return constant scores that are equal to 1.0. :arg fields: :arg meta: Metadata about the field. :arg copy_to: Allows you to copy the values of multiple fields into a group field, which can then be queried as a single field. """ name = "match_only_text" _param_defs = { "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, **kwargs: Any, ): if fields is not DEFAULT: kwargs["fields"] = fields if meta is not DEFAULT: kwargs["meta"] = meta if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) super().__init__(*args, **kwargs) class Murmur3(Field): """ :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "murmur3" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Nested(Object): """ :arg enabled: :arg include_in_parent: :arg include_in_root: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "nested" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, enabled: Union[bool, "DefaultType"] = DEFAULT, include_in_parent: Union[bool, "DefaultType"] = DEFAULT, include_in_root: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if enabled is not DEFAULT: kwargs["enabled"] = enabled if include_in_parent is not DEFAULT: kwargs["include_in_parent"] = include_in_parent if include_in_root is not DEFAULT: kwargs["include_in_root"] = include_in_root if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep kwargs.setdefault("multi", True) super().__init__(*args, **kwargs) class Passthrough(Field): """ :arg enabled: :arg priority: :arg time_series_dimension: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "passthrough" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, enabled: Union[bool, "DefaultType"] = DEFAULT, priority: Union[int, "DefaultType"] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if enabled is not DEFAULT: kwargs["enabled"] = enabled if priority is not DEFAULT: kwargs["priority"] = priority if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Percolator(Field): """ :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "percolator" _coerce = True _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> "Query": return Q(data) # type: ignore[no-any-return] def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]: if data is None: return None return data.to_dict() # type: ignore[no-any-return] class Point(Field): """ :arg ignore_malformed: :arg ignore_z_value: :arg null_value: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "point" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[str, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if ignore_z_value is not DEFAULT: kwargs["ignore_z_value"] = ignore_z_value if null_value is not DEFAULT: kwargs["null_value"] = null_value if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class RankFeature(Float): """ :arg positive_score_impact: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "rank_feature" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, positive_score_impact: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if positive_score_impact is not DEFAULT: kwargs["positive_score_impact"] = positive_score_impact if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class RankFeatures(Field): """ :arg positive_score_impact: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "rank_features" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, positive_score_impact: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if positive_score_impact is not DEFAULT: kwargs["positive_score_impact"] = positive_score_impact if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class RankVectors(Field): """ Technical preview :arg element_type: :arg dims: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "rank_vectors" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, element_type: Union[Literal["byte", "float", "bit"], "DefaultType"] = DEFAULT, dims: Union[int, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if element_type is not DEFAULT: kwargs["element_type"] = element_type if dims is not DEFAULT: kwargs["dims"] = dims if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class ScaledFloat(Float): """ :arg null_value: :arg scaling_factor: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "scaled_float" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, scaling_factor: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if scaling_factor is not DEFAULT: kwargs["scaling_factor"] = scaling_factor if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep if "scaling_factor" not in kwargs: if len(args) > 0: kwargs["scaling_factor"] = args[0] args = args[1:] else: raise TypeError("missing required argument: 'scaling_factor'") super().__init__(*args, **kwargs) class SearchAsYouType(Field): """ :arg analyzer: :arg index: :arg index_options: :arg max_shingle_size: :arg norms: :arg search_analyzer: :arg search_quote_analyzer: :arg similarity: :arg term_vector: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "search_as_you_type" _param_defs = { "analyzer": {"type": "analyzer"}, "search_analyzer": {"type": "analyzer"}, "search_quote_analyzer": {"type": "analyzer"}, "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ Literal["docs", "freqs", "positions", "offsets"], "DefaultType" ] = DEFAULT, max_shingle_size: Union[int, "DefaultType"] = DEFAULT, norms: Union[bool, "DefaultType"] = DEFAULT, search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, similarity: Union[str, None, "DefaultType"] = DEFAULT, term_vector: Union[ Literal[ "no", "yes", "with_offsets", "with_positions", "with_positions_offsets", "with_positions_offsets_payloads", "with_positions_payloads", ], "DefaultType", ] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options if max_shingle_size is not DEFAULT: kwargs["max_shingle_size"] = max_shingle_size if norms is not DEFAULT: kwargs["norms"] = norms if search_analyzer is not DEFAULT: kwargs["search_analyzer"] = search_analyzer if search_quote_analyzer is not DEFAULT: kwargs["search_quote_analyzer"] = search_quote_analyzer if similarity is not DEFAULT: kwargs["similarity"] = similarity if term_vector is not DEFAULT: kwargs["term_vector"] = term_vector if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class SemanticText(Field): """ :arg meta: :arg inference_id: Inference endpoint that will be used to generate embeddings for the field. This parameter cannot be updated. Use the Create inference API to create the endpoint. If `search_inference_id` is specified, the inference endpoint will only be used at index time. Defaults to `.elser-2-elasticsearch` if omitted. :arg search_inference_id: Inference endpoint that will be used to generate embeddings at query time. You can update this parameter by using the Update mapping API. Use the Create inference API to create the endpoint. If not specified, the inference endpoint defined by inference_id will be used at both index and query time. :arg chunking_settings: Settings for chunking text into smaller passages. If specified, these will override the chunking settings sent in the inference endpoint associated with inference_id. If chunking settings are updated, they will not be applied to existing documents until they are reindexed. """ name = "semantic_text" def __init__( self, *args: Any, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, search_inference_id: Union[str, "DefaultType"] = DEFAULT, chunking_settings: Union[ "types.ChunkingSettings", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if meta is not DEFAULT: kwargs["meta"] = meta if inference_id is not DEFAULT: kwargs["inference_id"] = inference_id if search_inference_id is not DEFAULT: kwargs["search_inference_id"] = search_inference_id if chunking_settings is not DEFAULT: kwargs["chunking_settings"] = chunking_settings super().__init__(*args, **kwargs) class Shape(Field): """ The `shape` data type facilitates the indexing of and searching with arbitrary `x, y` cartesian shapes such as rectangles and polygons. :arg coerce: :arg ignore_malformed: :arg ignore_z_value: :arg orientation: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "shape" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, ignore_z_value: Union[bool, "DefaultType"] = DEFAULT, orientation: Union[Literal["right", "left"], "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if ignore_z_value is not DEFAULT: kwargs["ignore_z_value"] = ignore_z_value if orientation is not DEFAULT: kwargs["orientation"] = orientation if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Short(Integer): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "short" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class SparseVector(Field): """ :arg store: :arg index_options: Additional index options for the sparse vector field that controls the token pruning behavior of the sparse vector field. :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "sparse_vector" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, store: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ "types.SparseVectorIndexOptions", Dict[str, Any], "DefaultType" ] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if store is not DEFAULT: kwargs["store"] = store if index_options is not DEFAULT: kwargs["index_options"] = index_options if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Text(Field): """ :arg analyzer: :arg boost: :arg eager_global_ordinals: :arg fielddata: :arg fielddata_frequency_filter: :arg index: :arg index_options: :arg index_phrases: :arg index_prefixes: :arg norms: :arg position_increment_gap: :arg search_analyzer: :arg search_quote_analyzer: :arg similarity: :arg term_vector: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "text" _param_defs = { "analyzer": {"type": "analyzer"}, "search_analyzer": {"type": "analyzer"}, "search_quote_analyzer": {"type": "analyzer"}, "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, eager_global_ordinals: Union[bool, "DefaultType"] = DEFAULT, fielddata: Union[bool, "DefaultType"] = DEFAULT, fielddata_frequency_filter: Union[ "types.FielddataFrequencyFilter", Dict[str, Any], "DefaultType" ] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, index_options: Union[ Literal["docs", "freqs", "positions", "offsets"], "DefaultType" ] = DEFAULT, index_phrases: Union[bool, "DefaultType"] = DEFAULT, index_prefixes: Union[ "types.TextIndexPrefixes", None, Dict[str, Any], "DefaultType" ] = DEFAULT, norms: Union[bool, "DefaultType"] = DEFAULT, position_increment_gap: Union[int, "DefaultType"] = DEFAULT, search_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, search_quote_analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, similarity: Union[str, None, "DefaultType"] = DEFAULT, term_vector: Union[ Literal[ "no", "yes", "with_offsets", "with_positions", "with_positions_offsets", "with_positions_offsets_payloads", "with_positions_payloads", ], "DefaultType", ] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if boost is not DEFAULT: kwargs["boost"] = boost if eager_global_ordinals is not DEFAULT: kwargs["eager_global_ordinals"] = eager_global_ordinals if fielddata is not DEFAULT: kwargs["fielddata"] = fielddata if fielddata_frequency_filter is not DEFAULT: kwargs["fielddata_frequency_filter"] = fielddata_frequency_filter if index is not DEFAULT: kwargs["index"] = index if index_options is not DEFAULT: kwargs["index_options"] = index_options if index_phrases is not DEFAULT: kwargs["index_phrases"] = index_phrases if index_prefixes is not DEFAULT: kwargs["index_prefixes"] = index_prefixes if norms is not DEFAULT: kwargs["norms"] = norms if position_increment_gap is not DEFAULT: kwargs["position_increment_gap"] = position_increment_gap if search_analyzer is not DEFAULT: kwargs["search_analyzer"] = search_analyzer if search_quote_analyzer is not DEFAULT: kwargs["search_quote_analyzer"] = search_quote_analyzer if similarity is not DEFAULT: kwargs["similarity"] = similarity if term_vector is not DEFAULT: kwargs["term_vector"] = term_vector if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class TokenCount(Field): """ :arg analyzer: :arg boost: :arg index: :arg null_value: :arg enable_position_increments: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "token_count" _param_defs = { "analyzer": {"type": "analyzer"}, "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, analyzer: Union[str, DslBase, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, null_value: Union[float, "DefaultType"] = DEFAULT, enable_position_increments: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if boost is not DEFAULT: kwargs["boost"] = boost if index is not DEFAULT: kwargs["index"] = index if null_value is not DEFAULT: kwargs["null_value"] = null_value if enable_position_increments is not DEFAULT: kwargs["enable_position_increments"] = enable_position_increments if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class UnsignedLong(Field): """ :arg null_value: :arg boost: :arg coerce: :arg ignore_malformed: :arg index: :arg on_script_error: :arg script: :arg time_series_metric: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg time_series_dimension: For internal use by Elastic only. Marks the field as a time series dimension. Defaults to false. :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "unsigned_long" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, coerce: Union[bool, "DefaultType"] = DEFAULT, ignore_malformed: Union[bool, "DefaultType"] = DEFAULT, index: Union[bool, "DefaultType"] = DEFAULT, on_script_error: Union[Literal["fail", "continue"], "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, time_series_metric: Union[ Literal["gauge", "counter", "summary", "histogram", "position"], "DefaultType", ] = DEFAULT, time_series_dimension: Union[bool, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if boost is not DEFAULT: kwargs["boost"] = boost if coerce is not DEFAULT: kwargs["coerce"] = coerce if ignore_malformed is not DEFAULT: kwargs["ignore_malformed"] = ignore_malformed if index is not DEFAULT: kwargs["index"] = index if on_script_error is not DEFAULT: kwargs["on_script_error"] = on_script_error if script is not DEFAULT: kwargs["script"] = script if time_series_metric is not DEFAULT: kwargs["time_series_metric"] = time_series_metric if time_series_dimension is not DEFAULT: kwargs["time_series_dimension"] = time_series_dimension if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Version(Field): """ :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "version" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) class Wildcard(Field): """ :arg null_value: :arg doc_values: :arg copy_to: :arg store: :arg meta: Metadata about the field. :arg properties: :arg ignore_above: :arg dynamic: :arg fields: :arg synthetic_source_keep: """ name = "wildcard" _param_defs = { "properties": {"type": "field", "hash": True}, "fields": {"type": "field", "hash": True}, } def __init__( self, *args: Any, null_value: Union[str, "DefaultType"] = DEFAULT, doc_values: Union[bool, "DefaultType"] = DEFAULT, copy_to: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, store: Union[bool, "DefaultType"] = DEFAULT, meta: Union[Mapping[str, str], "DefaultType"] = DEFAULT, properties: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, ignore_above: Union[int, "DefaultType"] = DEFAULT, dynamic: Union[ Literal["strict", "runtime", "true", "false"], bool, "DefaultType" ] = DEFAULT, fields: Union[Mapping[str, Field], "DefaultType"] = DEFAULT, synthetic_source_keep: Union[ Literal["none", "arrays", "all"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if null_value is not DEFAULT: kwargs["null_value"] = null_value if doc_values is not DEFAULT: kwargs["doc_values"] = doc_values if copy_to is not DEFAULT: if isinstance(copy_to, list): kwargs["copy_to"] = [str(field) for field in copy_to] else: kwargs["copy_to"] = str(copy_to) if store is not DEFAULT: kwargs["store"] = store if meta is not DEFAULT: kwargs["meta"] = meta if properties is not DEFAULT: kwargs["properties"] = properties if ignore_above is not DEFAULT: kwargs["ignore_above"] = ignore_above if dynamic is not DEFAULT: kwargs["dynamic"] = dynamic if fields is not DEFAULT: kwargs["fields"] = fields if synthetic_source_keep is not DEFAULT: kwargs["synthetic_source_keep"] = synthetic_source_keep super().__init__(*args, **kwargs) python-elasticsearch-9.1.1/elasticsearch/dsl/function.py000066400000000000000000000120071506101734100234440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import deepcopy from typing import ( Any, ClassVar, Dict, Literal, MutableMapping, Optional, Union, overload, ) from elastic_transport.client_utils import DEFAULT, DefaultType from .utils import AttrDict, DslBase @overload def SF(name_or_sf: MutableMapping[str, Any]) -> "ScoreFunction": ... @overload def SF(name_or_sf: "ScoreFunction") -> "ScoreFunction": ... @overload def SF(name_or_sf: str, **params: Any) -> "ScoreFunction": ... def SF( name_or_sf: Union[str, "ScoreFunction", MutableMapping[str, Any]], **params: Any, ) -> "ScoreFunction": # {"script_score": {"script": "_score"}, "filter": {}} if isinstance(name_or_sf, collections.abc.MutableMapping): if params: raise ValueError("SF() cannot accept parameters when passing in a dict.") kwargs: Dict[str, Any] = {} sf = deepcopy(name_or_sf) for k in ScoreFunction._param_defs: if k in name_or_sf: kwargs[k] = sf.pop(k) # not sf, so just filter+weight, which used to be boost factor sf_params = params if not sf: name = "boost_factor" # {'FUNCTION': {...}} elif len(sf) == 1: name, sf_params = sf.popitem() else: raise ValueError(f"SF() got an unexpected fields in the dictionary: {sf!r}") # boost factor special case, see elasticsearch #6343 if not isinstance(sf_params, collections.abc.Mapping): sf_params = {"value": sf_params} # mix known params (from _param_defs) and from inside the function kwargs.update(sf_params) return ScoreFunction.get_dsl_class(name)(**kwargs) # ScriptScore(script="_score", filter=Q()) if isinstance(name_or_sf, ScoreFunction): if params: raise ValueError( "SF() cannot accept parameters when passing in a ScoreFunction object." ) return name_or_sf # "script_score", script="_score", filter=Q() return ScoreFunction.get_dsl_class(name_or_sf)(**params) class ScoreFunction(DslBase): _type_name = "score_function" _type_shortcut = staticmethod(SF) _param_defs = { "query": {"type": "query"}, "filter": {"type": "query"}, "weight": {}, } name: ClassVar[Optional[str]] = None def to_dict(self) -> Dict[str, Any]: d = super().to_dict() # filter and query dicts should be at the same level as us for k in self._param_defs: if self.name is not None: val = d[self.name] if isinstance(val, dict) and k in val: d[k] = val.pop(k) return d class ScriptScore(ScoreFunction): name = "script_score" class BoostFactor(ScoreFunction): name = "boost_factor" def to_dict(self) -> Dict[str, Any]: d = super().to_dict() if self.name is not None: val = d[self.name] if isinstance(val, dict): if "value" in val: d[self.name] = val.pop("value") else: del d[self.name] return d class RandomScore(ScoreFunction): name = "random_score" class FieldValueFactorScore(ScoreFunction): name = "field_value_factor" class FieldValueFactor(FieldValueFactorScore): # alias of the above pass class Linear(ScoreFunction): name = "linear" class Gauss(ScoreFunction): name = "gauss" class Exp(ScoreFunction): name = "exp" class DecayFunction(AttrDict[Any]): def __init__( self, *, decay: Union[float, "DefaultType"] = DEFAULT, offset: Any = DEFAULT, scale: Any = DEFAULT, origin: Any = DEFAULT, multi_value_mode: Union[ Literal["min", "max", "avg", "sum"], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if decay != DEFAULT: kwargs["decay"] = decay if offset != DEFAULT: kwargs["offset"] = offset if scale != DEFAULT: kwargs["scale"] = scale if origin != DEFAULT: kwargs["origin"] = origin if multi_value_mode != DEFAULT: kwargs["multi_value_mode"] = multi_value_mode super().__init__(kwargs) python-elasticsearch-9.1.1/elasticsearch/dsl/index.py000066400000000000000000000017371506101734100227360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.index import ( # noqa: F401 AsyncComposableIndexTemplate, AsyncIndex, AsyncIndexTemplate, ) from ._sync.index import ComposableIndexTemplate, Index, IndexTemplate # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/dsl/index_base.py000066400000000000000000000143231506101734100237230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from typing_extensions import Self from . import analysis from .utils import AnyUsingType, merge if TYPE_CHECKING: from .document_base import DocumentMeta from .field import Field from .mapping_base import MappingBase class IndexBase: def __init__(self, name: str, mapping_class: type, using: AnyUsingType = "default"): """ :arg name: name of the index :arg using: connection alias to use, defaults to ``'default'`` """ self._name = name self._doc_types: List["DocumentMeta"] = [] self._using = using self._settings: Dict[str, Any] = {} self._aliases: Dict[str, Any] = {} self._analysis: Dict[str, Any] = {} self._mapping_class = mapping_class self._mapping: Optional["MappingBase"] = None def resolve_nested( self, field_path: str ) -> Tuple[List[str], Optional["MappingBase"]]: for doc in self._doc_types: nested, field = doc._doc_type.mapping.resolve_nested(field_path) if field is not None: return nested, field if self._mapping: return self._mapping.resolve_nested(field_path) return [], None def resolve_field(self, field_path: str) -> Optional["Field"]: for doc in self._doc_types: field = doc._doc_type.mapping.resolve_field(field_path) if field is not None: return field if self._mapping: return self._mapping.resolve_field(field_path) return None def get_or_create_mapping(self) -> "MappingBase": if self._mapping is None: self._mapping = self._mapping_class() return self._mapping def mapping(self, mapping: "MappingBase") -> None: """ Associate a mapping (an instance of :class:`~elasticsearch.dsl.Mapping`) with this index. This means that, when this index is created, it will contain the mappings for the document type defined by those mappings. """ self.get_or_create_mapping().update(mapping) def document(self, document: "DocumentMeta") -> "DocumentMeta": """ Associate a :class:`~elasticsearch.dsl.Document` subclass with an index. This means that, when this index is created, it will contain the mappings for the ``Document``. If the ``Document`` class doesn't have a default index yet (by defining ``class Index``), this instance will be used. Can be used as a decorator:: i = Index('blog') @i.document class Post(Document): title = Text() # create the index, including Post mappings i.create() # .search() will now return a Search object that will return # properly deserialized Post instances s = i.search() """ self._doc_types.append(document) # If the document index does not have any name, that means the user # did not set any index already to the document. # So set this index as document index if document._index._name is None: document._index = self return document def settings(self, **kwargs: Any) -> Self: """ Add settings to the index:: i = Index('i') i.settings(number_of_shards=1, number_of_replicas=0) Multiple calls to ``settings`` will merge the keys, later overriding the earlier. """ self._settings.update(kwargs) return self def aliases(self, **kwargs: Any) -> Self: """ Add aliases to the index definition:: i = Index('blog-v2') i.aliases(blog={}, published={'filter': Q('term', published=True)}) """ self._aliases.update(kwargs) return self def analyzer(self, *args: Any, **kwargs: Any) -> None: """ Explicitly add an analyzer to an index. Note that all custom analyzers defined in mappings will also be created. This is useful for search analyzers. Example:: from elasticsearch.dsl import analyzer, tokenizer my_analyzer = analyzer('my_analyzer', tokenizer=tokenizer('trigram', 'nGram', min_gram=3, max_gram=3), filter=['lowercase'] ) i = Index('blog') i.analyzer(my_analyzer) """ analyzer = analysis.analyzer(*args, **kwargs) d = analyzer.get_analysis_definition() # empty custom analyzer, probably already defined out of our control if not d: return # merge the definition merge(self._analysis, d, True) def to_dict(self) -> Dict[str, Any]: out = {} if self._settings: out["settings"] = self._settings if self._aliases: out["aliases"] = self._aliases mappings = self._mapping.to_dict() if self._mapping else {} analysis = self._mapping._collect_analysis() if self._mapping else {} for d in self._doc_types: mapping = d._doc_type.mapping merge(mappings, mapping.to_dict(), True) merge(analysis, mapping._collect_analysis(), True) if mappings: out["mappings"] = mappings if analysis or self._analysis: merge(analysis, self._analysis) out.setdefault("settings", {})["analysis"] = analysis return out python-elasticsearch-9.1.1/elasticsearch/dsl/mapping.py000066400000000000000000000015741506101734100232610ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.mapping import AsyncMapping # noqa: F401 from ._sync.mapping import Mapping # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/dsl/mapping_base.py000066400000000000000000000164701506101734100242540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from itertools import chain from typing import Any, Dict, Iterator, List, Optional, Tuple, cast from typing_extensions import Self from .field import Field, Nested, Text, construct_field from .utils import DslBase META_FIELDS = frozenset( ( "dynamic", "transform", "dynamic_date_formats", "date_detection", "numeric_detection", "dynamic_templates", "enabled", ) ) class Properties(DslBase): name = "properties" _param_defs = {"properties": {"type": "field", "hash": True}} properties: Dict[str, Field] def __init__(self) -> None: super().__init__() def __repr__(self) -> str: return "Properties()" def __getitem__(self, name: str) -> Field: return self.properties[name] def __contains__(self, name: str) -> bool: return name in self.properties def to_dict(self) -> Dict[str, Any]: return cast(Dict[str, Field], super().to_dict()["properties"]) def field(self, name: str, *args: Any, **kwargs: Any) -> Self: self.properties[name] = construct_field(*args, **kwargs) return self def _collect_fields(self) -> Iterator[Field]: """Iterate over all Field objects within, including multi fields.""" fields = cast(Dict[str, Field], self.properties.to_dict()) # type: ignore[attr-defined] for f in fields.values(): yield f # multi fields if hasattr(f, "fields"): yield from f.fields.to_dict().values() # nested and inner objects if hasattr(f, "_collect_fields"): yield from f._collect_fields() def update(self, other_object: Any) -> None: if not hasattr(other_object, "properties"): # not an inner/nested object, no merge possible return our, other = self.properties, other_object.properties for name in other: if name in our: if hasattr(our[name], "update"): our[name].update(other[name]) continue our[name] = other[name] class MappingBase: def __init__(self) -> None: self.properties = Properties() self._meta: Dict[str, Any] = {} def __repr__(self) -> str: return "Mapping()" def _clone(self) -> Self: m = self.__class__() m.properties._params = self.properties._params.copy() return m def resolve_nested( self, field_path: str ) -> Tuple[List[str], Optional["MappingBase"]]: field = self nested = [] parts = field_path.split(".") for i, step in enumerate(parts): try: field = field[step] # type: ignore[assignment] except KeyError: return [], None if isinstance(field, Nested): nested.append(".".join(parts[: i + 1])) return nested, field def resolve_field(self, field_path: str) -> Optional[Field]: field = self for step in field_path.split("."): try: field = field[step] # type: ignore[assignment] except KeyError: return None return cast(Field, field) def _collect_analysis(self) -> Dict[str, Any]: analysis: Dict[str, Any] = {} fields = [] if "_all" in self._meta: fields.append(Text(**self._meta["_all"])) for f in chain(fields, self.properties._collect_fields()): for analyzer_name in ( "analyzer", "normalizer", "search_analyzer", "search_quote_analyzer", ): if not hasattr(f, analyzer_name): continue analyzer = getattr(f, analyzer_name) d = analyzer.get_analysis_definition() # empty custom analyzer, probably already defined out of our control if not d: continue # merge the definition # TODO: conflict detection/resolution for key in d: analysis.setdefault(key, {}).update(d[key]) return analysis def _update_from_dict(self, raw: Dict[str, Any]) -> None: for name, definition in raw.get("properties", {}).items(): self.field(name, definition) # metadata like _all etc for name, value in raw.items(): if name != "properties": if isinstance(value, collections.abc.Mapping): self.meta(name, **value) else: self.meta(name, value) def update(self, mapping: "MappingBase", update_only: bool = False) -> None: for name in mapping: if update_only and name in self: # nested and inner objects, merge recursively if hasattr(self[name], "update"): # FIXME only merge subfields, not the settings self[name].update(mapping[name], update_only) continue self.field(name, mapping[name]) if update_only: for name in mapping._meta: if name not in self._meta: self._meta[name] = mapping._meta[name] else: self._meta.update(mapping._meta) def __contains__(self, name: str) -> bool: return name in self.properties.properties def __getitem__(self, name: str) -> Field: return self.properties.properties[name] def __iter__(self) -> Iterator[str]: return iter(self.properties.properties) def field(self, *args: Any, **kwargs: Any) -> Self: self.properties.field(*args, **kwargs) return self def meta(self, name: str, params: Any = None, **kwargs: Any) -> Self: if not name.startswith("_") and name not in META_FIELDS: name = "_" + name if params and kwargs: raise ValueError("Meta configs cannot have both value and a dictionary.") self._meta[name] = kwargs if params is None else params return self def to_dict(self) -> Dict[str, Any]: meta = self._meta # hard coded serialization of analyzers in _all if "_all" in meta: meta = meta.copy() _all = meta["_all"] = meta["_all"].copy() for f in ("analyzer", "search_analyzer", "search_quote_analyzer"): if hasattr(_all.get(f, None), "to_dict"): _all[f] = _all[f].to_dict() meta.update(self.properties.to_dict()) return meta python-elasticsearch-9.1.1/elasticsearch/dsl/query.py000066400000000000000000003150301506101734100227660ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import deepcopy from itertools import chain from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Literal, Mapping, MutableMapping, Optional, Protocol, Sequence, TypeVar, Union, cast, overload, ) from elastic_transport.client_utils import DEFAULT # 'SF' looks unused but the test suite assumes it's available # from this module so others are liable to do so as well. from .function import SF # noqa: F401 from .function import ScoreFunction from .utils import DslBase if TYPE_CHECKING: from elastic_transport.client_utils import DefaultType from . import types, wrappers from .document_base import InstrumentedField _T = TypeVar("_T") _M = TypeVar("_M", bound=Mapping[str, Any]) class QProxiedProtocol(Protocol[_T]): _proxied: _T @overload def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ... @overload def Q(name_or_query: "Query") -> "Query": ... @overload def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ... @overload def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ... def Q( name_or_query: Union[ str, "Query", QProxiedProtocol[_T], MutableMapping[str, _M], ] = "match_all", **params: Any, ) -> Union["Query", _T]: # {"match": {"title": "python"}} if isinstance(name_or_query, collections.abc.MutableMapping): if params: raise ValueError("Q() cannot accept parameters when passing in a dict.") if len(name_or_query) != 1: raise ValueError( 'Q() can only accept dict with a single query ({"match": {...}}). ' "Instead it got (%r)" % name_or_query ) name, q_params = deepcopy(name_or_query).popitem() return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params) # MatchAll() if isinstance(name_or_query, Query): if params: raise ValueError( "Q() cannot accept parameters when passing in a Query object." ) return name_or_query # s.query = Q('filtered', query=s.query) if hasattr(name_or_query, "_proxied"): return cast(QProxiedProtocol[_T], name_or_query)._proxied # "match", title="python" return Query.get_dsl_class(name_or_query)(**params) class Query(DslBase): _type_name = "query" _type_shortcut = staticmethod(Q) name: ClassVar[Optional[str]] = None # Add type annotations for methods not defined in every subclass __ror__: ClassVar[Callable[["Query", "Query"], "Query"]] __radd__: ClassVar[Callable[["Query", "Query"], "Query"]] __rand__: ClassVar[Callable[["Query", "Query"], "Query"]] def __add__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__radd__"): return other.__radd__(self) return Bool(must=[self, other]) def __invert__(self) -> "Query": return Bool(must_not=[self]) def __or__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__ror__"): return other.__ror__(self) return Bool(should=[self, other]) def __and__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__rand__"): return other.__rand__(self) return Bool(must=[self, other]) class Bool(Query): """ matches documents matching boolean combinations of other queries. :arg filter: The clause (query) must appear in matching documents. However, unlike `must`, the score of the query will be ignored. :arg minimum_should_match: Specifies the number or percentage of `should` clauses returned documents must match. :arg must: The clause (query) must appear in matching documents and will contribute to the score. :arg must_not: The clause (query) must not appear in the matching documents. Because scoring is ignored, a score of `0` is returned for all documents. :arg should: The clause (query) should appear in the matching document. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "bool" _param_defs = { "filter": {"type": "query", "multi": True}, "must": {"type": "query", "multi": True}, "must_not": {"type": "query", "multi": True}, "should": {"type": "query", "multi": True}, } def __init__( self, *, filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, must: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, must_not: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, should: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( filter=filter, minimum_should_match=minimum_should_match, must=must, must_not=must_not, should=should, boost=boost, _name=_name, **kwargs, ) def __add__(self, other: Query) -> "Bool": q = self._clone() if isinstance(other, Bool): q.must += other.must q.should += other.should q.must_not += other.must_not q.filter += other.filter else: q.must.append(other) return q __radd__ = __add__ def __or__(self, other: Query) -> Query: for q in (self, other): if isinstance(q, Bool) and not any( (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None)) ): other = self if q is other else other q = q._clone() if isinstance(other, Bool) and not any( ( other.must, other.must_not, other.filter, getattr(other, "minimum_should_match", None), ) ): q.should.extend(other.should) else: q.should.append(other) return q return Bool(should=[self, other]) __ror__ = __or__ @property def _min_should_match(self) -> int: return getattr( self, "minimum_should_match", 0 if not self.should or (self.must or self.filter) else 1, ) def __invert__(self) -> Query: # Because an empty Bool query is treated like # MatchAll the inverse should be MatchNone if not any(chain(self.must, self.filter, self.should, self.must_not)): return MatchNone() negations: List[Query] = [] for q in chain(self.must, self.filter): negations.append(~q) for q in self.must_not: negations.append(q) if self.should and self._min_should_match: negations.append(Bool(must_not=self.should[:])) if len(negations) == 1: return negations[0] return Bool(should=negations) def __and__(self, other: Query) -> Query: q = self._clone() if isinstance(other, Bool): q.must += other.must q.must_not += other.must_not q.filter += other.filter q.should = [] # reset minimum_should_match as it will get calculated below if "minimum_should_match" in q._params: del q._params["minimum_should_match"] for qx in (self, other): min_should_match = qx._min_should_match # TODO: percentages or negative numbers will fail here # for now we report an error if not isinstance(min_should_match, int) or min_should_match < 0: raise ValueError( "Can only combine queries with positive integer values for minimum_should_match" ) # all subqueries are required if len(qx.should) <= min_should_match: q.must.extend(qx.should) # not all of them are required, use it and remember min_should_match elif not q.should: q.minimum_should_match = min_should_match q.should = qx.should # all queries are optional, just extend should elif q._min_should_match == 0 and min_should_match == 0: q.should.extend(qx.should) # not all are required, add a should list to the must with proper min_should_match else: q.must.append( Bool(should=qx.should, minimum_should_match=min_should_match) ) else: if not (q.must or q.filter) and q.should: q._params.setdefault("minimum_should_match", 1) q.must.append(other) return q __rand__ = __and__ class Boosting(Query): """ Returns documents matching a `positive` query while reducing the relevance score of documents that also match a `negative` query. :arg negative_boost: (required) Floating point number between 0 and 1.0 used to decrease the relevance scores of documents matching the `negative` query. :arg negative: (required) Query used to decrease the relevance score of matching documents. :arg positive: (required) Any returned documents must match this query. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "boosting" _param_defs = { "negative": {"type": "query"}, "positive": {"type": "query"}, } def __init__( self, *, negative_boost: Union[float, "DefaultType"] = DEFAULT, negative: Union[Query, "DefaultType"] = DEFAULT, positive: Union[Query, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( negative_boost=negative_boost, negative=negative, positive=positive, boost=boost, _name=_name, **kwargs, ) class Common(Query): """ :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "common" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.CommonTermsQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class CombinedFields(Query): """ The `combined_fields` query supports searching multiple text fields as if their contents had been indexed into one combined field. :arg fields: (required) List of fields to search. Field wildcard patterns are allowed. Only `text` fields are supported, and they must all have the same search `analyzer`. :arg query: (required) Text to search for in the provided `fields`. The `combined_fields` query analyzes the provided text before performing a search. :arg auto_generate_synonyms_phrase_query: If true, match phrase queries are automatically created for multi-term synonyms. Defaults to `True` if omitted. :arg operator: Boolean logic used to interpret text in the query value. Defaults to `or` if omitted. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. :arg zero_terms_query: Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. Defaults to `none` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "combined_fields" def __init__( self, *, fields: Union[ Sequence[Union[str, "InstrumentedField"]], "DefaultType" ] = DEFAULT, query: Union[str, "DefaultType"] = DEFAULT, auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, operator: Union[Literal["or", "and"], "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, zero_terms_query: Union[Literal["none", "all"], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( fields=fields, query=query, auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, operator=operator, minimum_should_match=minimum_should_match, zero_terms_query=zero_terms_query, boost=boost, _name=_name, **kwargs, ) class ConstantScore(Query): """ Wraps a filter query and returns every matching document with a relevance score equal to the `boost` parameter value. :arg filter: (required) Filter query you wish to run. Any returned documents must match this query. Filter queries do not calculate relevance scores. To speed up performance, Elasticsearch automatically caches frequently used filter queries. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "constant_score" _param_defs = { "filter": {"type": "query"}, } def __init__( self, *, filter: Union[Query, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(filter=filter, boost=boost, _name=_name, **kwargs) class DisMax(Query): """ Returns documents matching one or more wrapped queries, called query clauses or clauses. If a returned document matches multiple query clauses, the `dis_max` query assigns the document the highest relevance score from any matching clause, plus a tie breaking increment for any additional matching subqueries. :arg queries: (required) One or more query clauses. Returned documents must match one or more of these queries. If a document matches multiple queries, Elasticsearch uses the highest relevance score. :arg tie_breaker: Floating point number between 0 and 1.0 used to increase the relevance scores of documents matching multiple query clauses. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "dis_max" _param_defs = { "queries": {"type": "query", "multi": True}, } def __init__( self, *, queries: Union[Sequence[Query], "DefaultType"] = DEFAULT, tie_breaker: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( queries=queries, tie_breaker=tie_breaker, boost=boost, _name=_name, **kwargs ) class DistanceFeature(Query): """ Boosts the relevance score of documents closer to a provided origin date or point. For example, you can use this query to give more weight to documents closer to a certain date or location. :arg origin: (required) Date or point of origin used to calculate distances. If the `field` value is a `date` or `date_nanos` field, the `origin` value must be a date. Date Math, such as `now-1h`, is supported. If the field value is a `geo_point` field, the `origin` value must be a geopoint. :arg pivot: (required) Distance from the `origin` at which relevance scores receive half of the `boost` value. If the `field` value is a `date` or `date_nanos` field, the `pivot` value must be a time unit, such as `1h` or `10d`. If the `field` value is a `geo_point` field, the `pivot` value must be a distance unit, such as `1km` or `12m`. :arg field: (required) Name of the field used to calculate distances. This field must meet the following criteria: be a `date`, `date_nanos` or `geo_point` field; have an `index` mapping parameter value of `true`, which is the default; have an `doc_values` mapping parameter value of `true`, which is the default. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "distance_feature" def __init__( self, *, origin: Any = DEFAULT, pivot: Any = DEFAULT, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( origin=origin, pivot=pivot, field=field, boost=boost, _name=_name, **kwargs ) class Exists(Query): """ Returns documents that contain an indexed value for a field. :arg field: (required) Name of the field you wish to search. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "exists" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(field=field, boost=boost, _name=_name, **kwargs) class FunctionScore(Query): """ The `function_score` enables you to modify the score of documents that are retrieved by a query. :arg boost_mode: Defines how he newly computed score is combined with the score of the query Defaults to `multiply` if omitted. :arg functions: One or more functions that compute a new score for each document returned by the query. :arg max_boost: Restricts the new score to not exceed the provided limit. :arg min_score: Excludes documents that do not meet the provided score threshold. :arg query: A query that determines the documents for which a new score is computed. :arg score_mode: Specifies how the computed scores are combined Defaults to `multiply` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "function_score" _param_defs = { "functions": {"type": "score_function", "multi": True}, "query": {"type": "query"}, "filter": {"type": "query"}, } def __init__( self, *, boost_mode: Union[ Literal["multiply", "replace", "sum", "avg", "max", "min"], "DefaultType" ] = DEFAULT, functions: Union[Sequence[ScoreFunction], "DefaultType"] = DEFAULT, max_boost: Union[float, "DefaultType"] = DEFAULT, min_score: Union[float, "DefaultType"] = DEFAULT, query: Union[Query, "DefaultType"] = DEFAULT, score_mode: Union[ Literal["multiply", "sum", "avg", "first", "max", "min"], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if functions is DEFAULT: functions = [] for name in ScoreFunction._classes: if name in kwargs: functions.append({name: kwargs.pop(name)}) # type: ignore[arg-type] super().__init__( boost_mode=boost_mode, functions=functions, max_boost=max_boost, min_score=min_score, query=query, score_mode=score_mode, boost=boost, _name=_name, **kwargs, ) class Fuzzy(Query): """ Returns documents that contain terms similar to the search term, as measured by a Levenshtein edit distance. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "fuzzy" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.FuzzyQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class GeoBoundingBox(Query): """ Matches geo_point and geo_shape values that intersect a bounding box. :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg type: :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. Set to `COERCE` to also try to infer correct latitude or longitude. Defaults to `'strict'` if omitted. :arg ignore_unmapped: Set to `true` to ignore an unmapped field and not match any documents for this query. Set to `false` to throw an exception if the field is not mapped. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "geo_bounding_box" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.CoordsGeoBounds", "types.TopLeftBottomRightGeoBounds", "types.TopRightBottomLeftGeoBounds", "types.WktGeoBounds", Dict[str, Any], "DefaultType", ] = DEFAULT, *, type: Union[Literal["memory", "indexed"], "DefaultType"] = DEFAULT, validation_method: Union[ Literal["coerce", "ignore_malformed", "strict"], "DefaultType" ] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__( type=type, validation_method=validation_method, ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs, ) class GeoDistance(Query): """ Matches `geo_point` and `geo_shape` values within a given distance of a geopoint. :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg distance: (required) The radius of the circle centred on the specified location. Points which fall into this circle are considered to be matches. :arg distance_type: How to compute the distance. Set to `plane` for a faster calculation that's inaccurate on long distances and close to the poles. Defaults to `'arc'` if omitted. :arg validation_method: Set to `IGNORE_MALFORMED` to accept geo points with invalid latitude or longitude. Set to `COERCE` to also try to infer correct latitude or longitude. Defaults to `'strict'` if omitted. :arg ignore_unmapped: Set to `true` to ignore an unmapped field and not match any documents for this query. Set to `false` to throw an exception if the field is not mapped. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "geo_distance" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.LatLonGeoLocation", "types.GeoHashLocation", Sequence[float], str, Dict[str, Any], "DefaultType", ] = DEFAULT, *, distance: Union[str, "DefaultType"] = DEFAULT, distance_type: Union[Literal["arc", "plane"], "DefaultType"] = DEFAULT, validation_method: Union[ Literal["coerce", "ignore_malformed", "strict"], "DefaultType" ] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__( distance=distance, distance_type=distance_type, validation_method=validation_method, ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs, ) class GeoGrid(Query): """ Matches `geo_point` and `geo_shape` values that intersect a grid cell from a GeoGrid aggregation. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "geo_grid" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.GeoGridQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class GeoPolygon(Query): """ :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg validation_method: Defaults to `'strict'` if omitted. :arg ignore_unmapped: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "geo_polygon" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.GeoPolygonPoints", Dict[str, Any], "DefaultType" ] = DEFAULT, *, validation_method: Union[ Literal["coerce", "ignore_malformed", "strict"], "DefaultType" ] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__( validation_method=validation_method, ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs, ) class GeoShape(Query): """ Filter documents indexed using either the `geo_shape` or the `geo_point` type. :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg ignore_unmapped: Set to `true` to ignore an unmapped field and not match any documents for this query. Set to `false` to throw an exception if the field is not mapped. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "geo_shape" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.GeoShapeFieldQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, *, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__( ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs ) class HasChild(Query): """ Returns parent documents whose joined child documents match a provided query. :arg query: (required) Query you wish to run on child documents of the `type` field. If a child document matches the search, the query returns the parent document. :arg type: (required) Name of the child relationship mapped for the `join` field. :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. :arg inner_hits: If defined, each search hit will contain inner hits. :arg max_children: Maximum number of child documents that match the query allowed for a returned parent document. If the parent document exceeds this limit, it is excluded from the search results. :arg min_children: Minimum number of child documents that match the query required to match the query for a returned parent document. If the parent document does not meet this limit, it is excluded from the search results. :arg score_mode: Indicates how scores for matching child documents affect the root parent document’s relevance score. Defaults to `'none'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "has_child" _param_defs = { "query": {"type": "query"}, } def __init__( self, *, query: Union[Query, "DefaultType"] = DEFAULT, type: Union[str, "DefaultType"] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, max_children: Union[int, "DefaultType"] = DEFAULT, min_children: Union[int, "DefaultType"] = DEFAULT, score_mode: Union[ Literal["none", "avg", "sum", "max", "min"], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( query=query, type=type, ignore_unmapped=ignore_unmapped, inner_hits=inner_hits, max_children=max_children, min_children=min_children, score_mode=score_mode, boost=boost, _name=_name, **kwargs, ) class HasParent(Query): """ Returns child documents whose joined parent document matches a provided query. :arg parent_type: (required) Name of the parent relationship mapped for the `join` field. :arg query: (required) Query you wish to run on parent documents of the `parent_type` field. If a parent document matches the search, the query returns its child documents. :arg ignore_unmapped: Indicates whether to ignore an unmapped `parent_type` and not return any documents instead of an error. You can use this parameter to query multiple indices that may not contain the `parent_type`. :arg inner_hits: If defined, each search hit will contain inner hits. :arg score: Indicates whether the relevance score of a matching parent document is aggregated into its child documents. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "has_parent" _param_defs = { "query": {"type": "query"}, } def __init__( self, *, parent_type: Union[str, "DefaultType"] = DEFAULT, query: Union[Query, "DefaultType"] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, score: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( parent_type=parent_type, query=query, ignore_unmapped=ignore_unmapped, inner_hits=inner_hits, score=score, boost=boost, _name=_name, **kwargs, ) class Ids(Query): """ Returns documents based on their IDs. This query uses document IDs stored in the `_id` field. :arg values: An array of document IDs. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "ids" def __init__( self, *, values: Union[str, Sequence[str], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(values=values, boost=boost, _name=_name, **kwargs) class Intervals(Query): """ Returns documents based on the order and proximity of matching terms. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "intervals" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.IntervalsQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class Knn(Query): """ Finds the k nearest vectors to a query vector, as measured by a similarity metric. knn query finds nearest vectors through approximate search on indexed dense_vectors. :arg field: (required) The name of the vector field to search against :arg query_vector: The query vector :arg query_vector_builder: The query vector builder. You must provide a query_vector_builder or query_vector, but not both. :arg num_candidates: The number of nearest neighbor candidates to consider per shard :arg k: The final number of nearest neighbors to return as top hits :arg filter: Filters for the kNN search query :arg similarity: The minimum similarity for a vector to be considered a match :arg rescore_vector: Apply oversampling and rescoring to quantized vectors :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "knn" _param_defs = { "filter": {"type": "query", "multi": True}, } def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, query_vector: Union[Sequence[float], "DefaultType"] = DEFAULT, query_vector_builder: Union[ "types.QueryVectorBuilder", Dict[str, Any], "DefaultType" ] = DEFAULT, num_candidates: Union[int, "DefaultType"] = DEFAULT, k: Union[int, "DefaultType"] = DEFAULT, filter: Union[Query, Sequence[Query], "DefaultType"] = DEFAULT, similarity: Union[float, "DefaultType"] = DEFAULT, rescore_vector: Union[ "types.RescoreVector", Dict[str, Any], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, query_vector=query_vector, query_vector_builder=query_vector_builder, num_candidates=num_candidates, k=k, filter=filter, similarity=similarity, rescore_vector=rescore_vector, boost=boost, _name=_name, **kwargs, ) class Match(Query): """ Returns documents that match a provided text, number, date or boolean value. The provided text is analyzed before matching. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "match" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.MatchQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class MatchAll(Query): """ Matches all documents, giving them all a `_score` of 1.0. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "match_all" def __init__( self, *, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(boost=boost, _name=_name, **kwargs) def __add__(self, other: "Query") -> "Query": return other._clone() __and__ = __rand__ = __radd__ = __add__ def __or__(self, other: "Query") -> "MatchAll": return self __ror__ = __or__ def __invert__(self) -> "MatchNone": return MatchNone() EMPTY_QUERY = MatchAll() class MatchBoolPrefix(Query): """ Analyzes its input and constructs a `bool` query from the terms. Each term except the last is used in a `term` query. The last term is used in a prefix query. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "match_bool_prefix" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.MatchBoolPrefixQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class MatchNone(Query): """ Matches no documents. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "match_none" def __init__( self, *, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(boost=boost, _name=_name, **kwargs) def __add__(self, other: "Query") -> "MatchNone": return self __and__ = __rand__ = __radd__ = __add__ def __or__(self, other: "Query") -> "Query": return other._clone() __ror__ = __or__ def __invert__(self) -> MatchAll: return MatchAll() class MatchPhrase(Query): """ Analyzes the text and creates a phrase query out of the analyzed text. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "match_phrase" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.MatchPhraseQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class MatchPhrasePrefix(Query): """ Returns documents that contain the words of a provided text, in the same order as provided. The last term of the provided text is treated as a prefix, matching any words that begin with that term. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "match_phrase_prefix" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.MatchPhrasePrefixQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class MoreLikeThis(Query): """ Returns documents that are "like" a given set of documents. :arg like: (required) Specifies free form text and/or a single or multiple documents for which you want to find similar documents. :arg analyzer: The analyzer that is used to analyze the free form text. Defaults to the analyzer associated with the first field in fields. :arg boost_terms: Each term in the formed query could be further boosted by their tf-idf score. This sets the boost factor to use when using this feature. Defaults to deactivated (0). :arg fail_on_unsupported_field: Controls whether the query should fail (throw an exception) if any of the specified fields are not of the supported types (`text` or `keyword`). Defaults to `True` if omitted. :arg fields: A list of fields to fetch and analyze the text from. Defaults to the `index.query.default_field` index setting, which has a default value of `*`. :arg include: Specifies whether the input documents should also be included in the search results returned. :arg max_doc_freq: The maximum document frequency above which the terms are ignored from the input document. :arg max_query_terms: The maximum number of query terms that can be selected. Defaults to `25` if omitted. :arg max_word_length: The maximum word length above which the terms are ignored. Defaults to unbounded (`0`). :arg min_doc_freq: The minimum document frequency below which the terms are ignored from the input document. Defaults to `5` if omitted. :arg minimum_should_match: After the disjunctive query has been formed, this parameter controls the number of terms that must match. :arg min_term_freq: The minimum term frequency below which the terms are ignored from the input document. Defaults to `2` if omitted. :arg min_word_length: The minimum word length below which the terms are ignored. :arg routing: :arg stop_words: An array of stop words. Any word in this set is ignored. :arg unlike: Used in combination with `like` to exclude documents that match a set of terms. :arg version: :arg version_type: Defaults to `'internal'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "more_like_this" def __init__( self, *, like: Union[ Union[str, "types.LikeDocument"], Sequence[Union[str, "types.LikeDocument"]], Dict[str, Any], "DefaultType", ] = DEFAULT, analyzer: Union[str, "DefaultType"] = DEFAULT, boost_terms: Union[float, "DefaultType"] = DEFAULT, fail_on_unsupported_field: Union[bool, "DefaultType"] = DEFAULT, fields: Union[ Sequence[Union[str, "InstrumentedField"]], "DefaultType" ] = DEFAULT, include: Union[bool, "DefaultType"] = DEFAULT, max_doc_freq: Union[int, "DefaultType"] = DEFAULT, max_query_terms: Union[int, "DefaultType"] = DEFAULT, max_word_length: Union[int, "DefaultType"] = DEFAULT, min_doc_freq: Union[int, "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, min_term_freq: Union[int, "DefaultType"] = DEFAULT, min_word_length: Union[int, "DefaultType"] = DEFAULT, routing: Union[str, "DefaultType"] = DEFAULT, stop_words: Union[ Literal[ "_arabic_", "_armenian_", "_basque_", "_bengali_", "_brazilian_", "_bulgarian_", "_catalan_", "_cjk_", "_czech_", "_danish_", "_dutch_", "_english_", "_estonian_", "_finnish_", "_french_", "_galician_", "_german_", "_greek_", "_hindi_", "_hungarian_", "_indonesian_", "_irish_", "_italian_", "_latvian_", "_lithuanian_", "_norwegian_", "_persian_", "_portuguese_", "_romanian_", "_russian_", "_serbian_", "_sorani_", "_spanish_", "_swedish_", "_thai_", "_turkish_", "_none_", ], Sequence[str], "DefaultType", ] = DEFAULT, unlike: Union[ Union[str, "types.LikeDocument"], Sequence[Union[str, "types.LikeDocument"]], Dict[str, Any], "DefaultType", ] = DEFAULT, version: Union[int, "DefaultType"] = DEFAULT, version_type: Union[ Literal["internal", "external", "external_gte", "force"], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( like=like, analyzer=analyzer, boost_terms=boost_terms, fail_on_unsupported_field=fail_on_unsupported_field, fields=fields, include=include, max_doc_freq=max_doc_freq, max_query_terms=max_query_terms, max_word_length=max_word_length, min_doc_freq=min_doc_freq, minimum_should_match=minimum_should_match, min_term_freq=min_term_freq, min_word_length=min_word_length, routing=routing, stop_words=stop_words, unlike=unlike, version=version, version_type=version_type, boost=boost, _name=_name, **kwargs, ) class MultiMatch(Query): """ Enables you to search for a provided text, number, date or boolean value across multiple fields. The provided text is analyzed before matching. :arg query: (required) Text, number, boolean value or date you wish to find in the provided field. :arg analyzer: Analyzer used to convert the text in the query value into tokens. :arg auto_generate_synonyms_phrase_query: If `true`, match phrase queries are automatically created for multi-term synonyms. Defaults to `True` if omitted. :arg cutoff_frequency: :arg fields: The fields to be queried. Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. :arg fuzziness: Maximum edit distance allowed for matching. :arg fuzzy_rewrite: Method used to rewrite the query. :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. Defaults to `True` if omitted. :arg lenient: If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. :arg max_expansions: Maximum number of terms to which the query will expand. Defaults to `50` if omitted. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. :arg operator: Boolean logic used to interpret text in the query value. Defaults to `'or'` if omitted. :arg prefix_length: Number of beginning characters left unchanged for fuzzy matching. :arg slop: Maximum number of positions allowed between matching tokens. :arg tie_breaker: Determines how scores for each per-term blended query and scores across groups are combined. :arg type: How `the` multi_match query is executed internally. Defaults to `'best_fields'` if omitted. :arg zero_terms_query: Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. Defaults to `'none'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "multi_match" def __init__( self, *, query: Union[str, "DefaultType"] = DEFAULT, analyzer: Union[str, "DefaultType"] = DEFAULT, auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, cutoff_frequency: Union[float, "DefaultType"] = DEFAULT, fields: Union[ Union[str, "InstrumentedField"], Sequence[Union[str, "InstrumentedField"]], "DefaultType", ] = DEFAULT, fuzziness: Union[str, int, "DefaultType"] = DEFAULT, fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, lenient: Union[bool, "DefaultType"] = DEFAULT, max_expansions: Union[int, "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, prefix_length: Union[int, "DefaultType"] = DEFAULT, slop: Union[int, "DefaultType"] = DEFAULT, tie_breaker: Union[float, "DefaultType"] = DEFAULT, type: Union[ Literal[ "best_fields", "most_fields", "cross_fields", "phrase", "phrase_prefix", "bool_prefix", ], "DefaultType", ] = DEFAULT, zero_terms_query: Union[Literal["all", "none"], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( query=query, analyzer=analyzer, auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, cutoff_frequency=cutoff_frequency, fields=fields, fuzziness=fuzziness, fuzzy_rewrite=fuzzy_rewrite, fuzzy_transpositions=fuzzy_transpositions, lenient=lenient, max_expansions=max_expansions, minimum_should_match=minimum_should_match, operator=operator, prefix_length=prefix_length, slop=slop, tie_breaker=tie_breaker, type=type, zero_terms_query=zero_terms_query, boost=boost, _name=_name, **kwargs, ) class Nested(Query): """ Wraps another query to search nested fields. If an object matches the search, the nested query returns the root parent document. :arg path: (required) Path to the nested object you wish to search. :arg query: (required) Query you wish to run on nested objects in the path. :arg ignore_unmapped: Indicates whether to ignore an unmapped path and not return any documents instead of an error. :arg inner_hits: If defined, each search hit will contain inner hits. :arg score_mode: How scores for matching child objects affect the root parent document’s relevance score. Defaults to `'avg'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "nested" _param_defs = { "query": {"type": "query"}, } def __init__( self, *, path: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, query: Union[Query, "DefaultType"] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, inner_hits: Union["types.InnerHits", Dict[str, Any], "DefaultType"] = DEFAULT, score_mode: Union[ Literal["none", "avg", "sum", "max", "min"], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( path=path, query=query, ignore_unmapped=ignore_unmapped, inner_hits=inner_hits, score_mode=score_mode, boost=boost, _name=_name, **kwargs, ) class ParentId(Query): """ Returns child documents joined to a specific parent document. :arg id: ID of the parent document. :arg ignore_unmapped: Indicates whether to ignore an unmapped `type` and not return any documents instead of an error. :arg type: Name of the child relationship mapped for the `join` field. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "parent_id" def __init__( self, *, id: Union[str, "DefaultType"] = DEFAULT, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, type: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( id=id, ignore_unmapped=ignore_unmapped, type=type, boost=boost, _name=_name, **kwargs, ) class Percolate(Query): """ Matches queries stored in an index. :arg field: (required) Field that holds the indexed queries. The field must use the `percolator` mapping type. :arg document: The source of the document being percolated. :arg documents: An array of sources of the documents being percolated. :arg id: The ID of a stored document to percolate. :arg index: The index of a stored document to percolate. :arg name: The suffix used for the `_percolator_document_slot` field when multiple `percolate` queries are specified. :arg preference: Preference used to fetch document to percolate. :arg routing: Routing used to fetch document to percolate. :arg version: The expected version of a stored document to percolate. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "percolate" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, document: Any = DEFAULT, documents: Union[Sequence[Any], "DefaultType"] = DEFAULT, id: Union[str, "DefaultType"] = DEFAULT, index: Union[str, "DefaultType"] = DEFAULT, name: Union[str, "DefaultType"] = DEFAULT, preference: Union[str, "DefaultType"] = DEFAULT, routing: Union[str, "DefaultType"] = DEFAULT, version: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, document=document, documents=documents, id=id, index=index, name=name, preference=preference, routing=routing, version=version, boost=boost, _name=_name, **kwargs, ) class Pinned(Query): """ Promotes selected documents to rank higher than those matching a given query. :arg organic: (required) Any choice of query used to rank documents which will be ranked below the "pinned" documents. :arg ids: Document IDs listed in the order they are to appear in results. Required if `docs` is not specified. :arg docs: Documents listed in the order they are to appear in results. Required if `ids` is not specified. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "pinned" _param_defs = { "organic": {"type": "query"}, } def __init__( self, *, organic: Union[Query, "DefaultType"] = DEFAULT, ids: Union[Sequence[str], "DefaultType"] = DEFAULT, docs: Union[ Sequence["types.PinnedDoc"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( organic=organic, ids=ids, docs=docs, boost=boost, _name=_name, **kwargs ) class Prefix(Query): """ Returns documents that contain a specific prefix in a provided field. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "prefix" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.PrefixQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class QueryString(Query): """ Returns documents based on a provided query string, using a parser with a strict syntax. :arg query: (required) Query string you wish to parse and use for search. :arg allow_leading_wildcard: If `true`, the wildcard characters `*` and `?` are allowed as the first character of the query string. Defaults to `True` if omitted. :arg analyzer: Analyzer used to convert text in the query string into tokens. :arg analyze_wildcard: If `true`, the query attempts to analyze wildcard terms in the query string. :arg auto_generate_synonyms_phrase_query: If `true`, match phrase queries are automatically created for multi-term synonyms. Defaults to `True` if omitted. :arg default_field: Default field to search if no field is provided in the query string. Supports wildcards (`*`). Defaults to the `index.query.default_field` index setting, which has a default value of `*`. :arg default_operator: Default boolean logic used to interpret text in the query string if no operators are specified. Defaults to `'or'` if omitted. :arg enable_position_increments: If `true`, enable position increments in queries constructed from a `query_string` search. Defaults to `True` if omitted. :arg escape: :arg fields: Array of fields to search. Supports wildcards (`*`). :arg fuzziness: Maximum edit distance allowed for fuzzy matching. :arg fuzzy_max_expansions: Maximum number of terms to which the query expands for fuzzy matching. Defaults to `50` if omitted. :arg fuzzy_prefix_length: Number of beginning characters left unchanged for fuzzy matching. :arg fuzzy_rewrite: Method used to rewrite the query. :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Defaults to `True` if omitted. :arg lenient: If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. :arg max_determinized_states: Maximum number of automaton states required for the query. Defaults to `10000` if omitted. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. :arg phrase_slop: Maximum number of positions allowed between matching tokens for phrases. :arg quote_analyzer: Analyzer used to convert quoted text in the query string into tokens. For quoted text, this parameter overrides the analyzer specified in the `analyzer` parameter. :arg quote_field_suffix: Suffix appended to quoted text in the query string. You can use this suffix to use a different analysis method for exact matches. :arg rewrite: Method used to rewrite the query. :arg tie_breaker: How to combine the queries generated from the individual search terms in the resulting `dis_max` query. :arg time_zone: Coordinated Universal Time (UTC) offset or IANA time zone used to convert date values in the query string to UTC. :arg type: Determines how the query matches and scores documents. Defaults to `'best_fields'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "query_string" def __init__( self, *, query: Union[str, "DefaultType"] = DEFAULT, allow_leading_wildcard: Union[bool, "DefaultType"] = DEFAULT, analyzer: Union[str, "DefaultType"] = DEFAULT, analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, default_field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, enable_position_increments: Union[bool, "DefaultType"] = DEFAULT, escape: Union[bool, "DefaultType"] = DEFAULT, fields: Union[ Sequence[Union[str, "InstrumentedField"]], "DefaultType" ] = DEFAULT, fuzziness: Union[str, int, "DefaultType"] = DEFAULT, fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, fuzzy_rewrite: Union[str, "DefaultType"] = DEFAULT, fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, lenient: Union[bool, "DefaultType"] = DEFAULT, max_determinized_states: Union[int, "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, phrase_slop: Union[float, "DefaultType"] = DEFAULT, quote_analyzer: Union[str, "DefaultType"] = DEFAULT, quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, rewrite: Union[str, "DefaultType"] = DEFAULT, tie_breaker: Union[float, "DefaultType"] = DEFAULT, time_zone: Union[str, "DefaultType"] = DEFAULT, type: Union[ Literal[ "best_fields", "most_fields", "cross_fields", "phrase", "phrase_prefix", "bool_prefix", ], "DefaultType", ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( query=query, allow_leading_wildcard=allow_leading_wildcard, analyzer=analyzer, analyze_wildcard=analyze_wildcard, auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, default_field=default_field, default_operator=default_operator, enable_position_increments=enable_position_increments, escape=escape, fields=fields, fuzziness=fuzziness, fuzzy_max_expansions=fuzzy_max_expansions, fuzzy_prefix_length=fuzzy_prefix_length, fuzzy_rewrite=fuzzy_rewrite, fuzzy_transpositions=fuzzy_transpositions, lenient=lenient, max_determinized_states=max_determinized_states, minimum_should_match=minimum_should_match, phrase_slop=phrase_slop, quote_analyzer=quote_analyzer, quote_field_suffix=quote_field_suffix, rewrite=rewrite, tie_breaker=tie_breaker, time_zone=time_zone, type=type, boost=boost, _name=_name, **kwargs, ) class Range(Query): """ Returns documents that contain terms within a provided range. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "range" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["wrappers.Range[Any]", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class RankFeature(Query): """ Boosts the relevance score of documents based on the numeric value of a `rank_feature` or `rank_features` field. :arg field: (required) `rank_feature` or `rank_features` field used to boost relevance scores. :arg saturation: Saturation function used to boost relevance scores based on the value of the rank feature `field`. :arg log: Logarithmic function used to boost relevance scores based on the value of the rank feature `field`. :arg linear: Linear function used to boost relevance scores based on the value of the rank feature `field`. :arg sigmoid: Sigmoid function used to boost relevance scores based on the value of the rank feature `field`. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "rank_feature" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, saturation: Union[ "types.RankFeatureFunctionSaturation", Dict[str, Any], "DefaultType" ] = DEFAULT, log: Union[ "types.RankFeatureFunctionLogarithm", Dict[str, Any], "DefaultType" ] = DEFAULT, linear: Union[ "types.RankFeatureFunctionLinear", Dict[str, Any], "DefaultType" ] = DEFAULT, sigmoid: Union[ "types.RankFeatureFunctionSigmoid", Dict[str, Any], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, saturation=saturation, log=log, linear=linear, sigmoid=sigmoid, boost=boost, _name=_name, **kwargs, ) class Regexp(Query): """ Returns documents that contain terms matching a regular expression. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "regexp" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.RegexpQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class Rule(Query): """ :arg organic: (required) :arg match_criteria: (required) :arg ruleset_ids: :arg ruleset_id: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "rule" _param_defs = { "organic": {"type": "query"}, } def __init__( self, *, organic: Union[Query, "DefaultType"] = DEFAULT, match_criteria: Any = DEFAULT, ruleset_ids: Union[str, Sequence[str], "DefaultType"] = DEFAULT, ruleset_id: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( organic=organic, match_criteria=match_criteria, ruleset_ids=ruleset_ids, ruleset_id=ruleset_id, boost=boost, _name=_name, **kwargs, ) class Script(Query): """ Filters documents based on a provided script. The script query is typically used in a filter context. :arg script: (required) Contains a script to run as a query. This script must return a boolean value, `true` or `false`. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "script" def __init__( self, *, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(script=script, boost=boost, _name=_name, **kwargs) class ScriptScore(Query): """ Uses a script to provide a custom score for returned documents. :arg query: (required) Query used to return documents. :arg script: (required) Script used to compute the score of documents returned by the query. Important: final relevance scores from the `script_score` query cannot be negative. :arg min_score: Documents with a score lower than this floating point number are excluded from the search results. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "script_score" _param_defs = { "query": {"type": "query"}, } def __init__( self, *, query: Union[Query, "DefaultType"] = DEFAULT, script: Union["types.Script", Dict[str, Any], "DefaultType"] = DEFAULT, min_score: Union[float, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( query=query, script=script, min_score=min_score, boost=boost, _name=_name, **kwargs, ) class Semantic(Query): """ A semantic query to semantic_text field types :arg field: (required) The field to query, which must be a semantic_text field type :arg query: (required) The query text :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "semantic" def __init__( self, *, field: Union[str, "DefaultType"] = DEFAULT, query: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) class Shape(Query): """ Queries documents that contain fields indexed using the `shape` type. :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg ignore_unmapped: When set to `true` the query ignores an unmapped field and will not match any documents. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "shape" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.ShapeFieldQuery", Dict[str, Any], "DefaultType"] = DEFAULT, *, ignore_unmapped: Union[bool, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__( ignore_unmapped=ignore_unmapped, boost=boost, _name=_name, **kwargs ) class SimpleQueryString(Query): """ Returns documents based on a provided query string, using a parser with a limited but fault-tolerant syntax. :arg query: (required) Query string in the simple query string syntax you wish to parse and use for search. :arg analyzer: Analyzer used to convert text in the query string into tokens. :arg analyze_wildcard: If `true`, the query attempts to analyze wildcard terms in the query string. :arg auto_generate_synonyms_phrase_query: If `true`, the parser creates a match_phrase query for each multi-position token. Defaults to `True` if omitted. :arg default_operator: Default boolean logic used to interpret text in the query string if no operators are specified. Defaults to `'or'` if omitted. :arg fields: Array of fields you wish to search. Accepts wildcard expressions. You also can boost relevance scores for matches to particular fields using a caret (`^`) notation. Defaults to the `index.query.default_field index` setting, which has a default value of `*`. :arg flags: List of enabled operators for the simple query string syntax. Defaults to `ALL` if omitted. :arg fuzzy_max_expansions: Maximum number of terms to which the query expands for fuzzy matching. Defaults to `50` if omitted. :arg fuzzy_prefix_length: Number of beginning characters left unchanged for fuzzy matching. :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). :arg lenient: If `true`, format-based errors, such as providing a text value for a numeric field, are ignored. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. :arg quote_field_suffix: Suffix appended to quoted text in the query string. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "simple_query_string" def __init__( self, *, query: Union[str, "DefaultType"] = DEFAULT, analyzer: Union[str, "DefaultType"] = DEFAULT, analyze_wildcard: Union[bool, "DefaultType"] = DEFAULT, auto_generate_synonyms_phrase_query: Union[bool, "DefaultType"] = DEFAULT, default_operator: Union[Literal["and", "or"], "DefaultType"] = DEFAULT, fields: Union[ Sequence[Union[str, "InstrumentedField"]], "DefaultType" ] = DEFAULT, flags: Union[ "types.PipeSeparatedFlags", Dict[str, Any], "DefaultType" ] = DEFAULT, fuzzy_max_expansions: Union[int, "DefaultType"] = DEFAULT, fuzzy_prefix_length: Union[int, "DefaultType"] = DEFAULT, fuzzy_transpositions: Union[bool, "DefaultType"] = DEFAULT, lenient: Union[bool, "DefaultType"] = DEFAULT, minimum_should_match: Union[int, str, "DefaultType"] = DEFAULT, quote_field_suffix: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( query=query, analyzer=analyzer, analyze_wildcard=analyze_wildcard, auto_generate_synonyms_phrase_query=auto_generate_synonyms_phrase_query, default_operator=default_operator, fields=fields, flags=flags, fuzzy_max_expansions=fuzzy_max_expansions, fuzzy_prefix_length=fuzzy_prefix_length, fuzzy_transpositions=fuzzy_transpositions, lenient=lenient, minimum_should_match=minimum_should_match, quote_field_suffix=quote_field_suffix, boost=boost, _name=_name, **kwargs, ) class SpanContaining(Query): """ Returns matches which enclose another span query. :arg big: (required) Can be any span query. Matching spans from `big` that contain matches from `little` are returned. :arg little: (required) Can be any span query. Matching spans from `big` that contain matches from `little` are returned. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_containing" def __init__( self, *, big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) class SpanFieldMasking(Query): """ Wrapper to allow span queries to participate in composite single-field span queries by _lying_ about their search field. :arg field: (required) :arg query: (required) :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_field_masking" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, query: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(field=field, query=query, boost=boost, _name=_name, **kwargs) class SpanFirst(Query): """ Matches spans near the beginning of a field. :arg end: (required) Controls the maximum end position permitted in a match. :arg match: (required) Can be any other span type query. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_first" def __init__( self, *, end: Union[int, "DefaultType"] = DEFAULT, match: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(end=end, match=match, boost=boost, _name=_name, **kwargs) class SpanMulti(Query): """ Allows you to wrap a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query) as a `span` query, so it can be nested. :arg match: (required) Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_multi" _param_defs = { "match": {"type": "query"}, } def __init__( self, *, match: Union[Query, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(match=match, boost=boost, _name=_name, **kwargs) class SpanNear(Query): """ Matches spans which are near one another. You can specify `slop`, the maximum number of intervening unmatched positions, as well as whether matches are required to be in-order. :arg clauses: (required) Array of one or more other span type queries. :arg in_order: Controls whether matches are required to be in-order. :arg slop: Controls the maximum number of intervening unmatched positions permitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_near" def __init__( self, *, clauses: Union[ Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, in_order: Union[bool, "DefaultType"] = DEFAULT, slop: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( clauses=clauses, in_order=in_order, slop=slop, boost=boost, _name=_name, **kwargs, ) class SpanNot(Query): """ Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens after (controlled by the parameter `post`) another span query. :arg exclude: (required) Span query whose matches must not overlap those returned. :arg include: (required) Span query whose matches are filtered. :arg dist: The number of tokens from within the include span that can’t have overlap with the exclude span. Equivalent to setting both `pre` and `post`. :arg post: The number of tokens after the include span that can’t have overlap with the exclude span. :arg pre: The number of tokens before the include span that can’t have overlap with the exclude span. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_not" def __init__( self, *, exclude: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, include: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, dist: Union[int, "DefaultType"] = DEFAULT, post: Union[int, "DefaultType"] = DEFAULT, pre: Union[int, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( exclude=exclude, include=include, dist=dist, post=post, pre=pre, boost=boost, _name=_name, **kwargs, ) class SpanOr(Query): """ Matches the union of its span clauses. :arg clauses: (required) Array of one or more other span type queries. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_or" def __init__( self, *, clauses: Union[ Sequence["types.SpanQuery"], Sequence[Dict[str, Any]], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(clauses=clauses, boost=boost, _name=_name, **kwargs) class SpanTerm(Query): """ Matches spans containing a term. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "span_term" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.SpanTermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class SpanWithin(Query): """ Returns matches which are enclosed inside another span query. :arg big: (required) Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. :arg little: (required) Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "span_within" def __init__( self, *, big: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, little: Union["types.SpanQuery", Dict[str, Any], "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(big=big, little=little, boost=boost, _name=_name, **kwargs) class SparseVector(Query): """ Using input query vectors or a natural language processing model to convert a query into a list of token-weight pairs, queries against a sparse vector field. :arg field: (required) The name of the field that contains the token- weight pairs to be searched against. This field must be a mapped sparse_vector field. :arg query_vector: Dictionary of precomputed sparse vectors and their associated weights. Only one of inference_id or query_vector may be supplied in a request. :arg inference_id: The inference ID to use to convert the query text into token-weight pairs. It must be the same inference ID that was used to create the tokens from the input text. Only one of inference_id and query_vector is allowed. If inference_id is specified, query must also be specified. Only one of inference_id or query_vector may be supplied in a request. :arg query: The query text you want to use for search. If inference_id is specified, query must also be specified. :arg prune: Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. Default: false :arg pruning_config: Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. This is only used if prune is set to true. If prune is set to true but pruning_config is not specified, default values will be used. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "sparse_vector" def __init__( self, *, field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, query_vector: Union[Mapping[str, float], "DefaultType"] = DEFAULT, inference_id: Union[str, "DefaultType"] = DEFAULT, query: Union[str, "DefaultType"] = DEFAULT, prune: Union[bool, "DefaultType"] = DEFAULT, pruning_config: Union[ "types.TokenPruningConfig", Dict[str, Any], "DefaultType" ] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__( field=field, query_vector=query_vector, inference_id=inference_id, query=query, prune=prune, pruning_config=pruning_config, boost=boost, _name=_name, **kwargs, ) class Term(Query): """ Returns documents that contain an exact term in a provided field. To return a document, the query term must exactly match the queried field's value, including whitespace and capitalization. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "term" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.TermQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class Terms(Query): """ Returns documents that contain one or more exact terms in a provided field. To return a document, one or more terms must exactly match a field value, including whitespace and capitalization. :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "terms" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ Sequence[Union[int, float, str, bool, None]], "types.TermsLookup", Dict[str, Any], "DefaultType", ] = DEFAULT, *, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(boost=boost, _name=_name, **kwargs) def _setattr(self, name: str, value: Any) -> None: # here we convert any iterables that are not strings to lists if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)): value = list(value) super()._setattr(name, value) class TermsSet(Query): """ Returns documents that contain a minimum number of exact terms in a provided field. To return a document, a required number of terms must exactly match the field values, including whitespace and capitalization. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "terms_set" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.TermsSetQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class TextExpansion(Query): """ Uses a natural language processing model to convert the query text into a list of token-weight pairs which are then used in a query against a sparse vector or rank features field. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "text_expansion" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.TextExpansionQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class WeightedTokens(Query): """ Supports returning text_expansion query results by sending in precomputed tokens with the query. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "weighted_tokens" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ "types.WeightedTokensQuery", Dict[str, Any], "DefaultType" ] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class Wildcard(Query): """ Returns documents that contain terms matching a wildcard pattern. :arg _field: The field to use in this query. :arg _value: The query value for the field. """ name = "wildcard" def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["types.WildcardQuery", Dict[str, Any], "DefaultType"] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value super().__init__(**kwargs) class Wrapper(Query): """ A query that accepts any other query as base64 encoded string. :arg query: (required) A base64 encoded query. The binary data format can be any of JSON, YAML, CBOR or SMILE encodings :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "wrapper" def __init__( self, *, query: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(query=query, boost=boost, _name=_name, **kwargs) class Type(Query): """ :arg value: (required) :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ name = "type" def __init__( self, *, value: Union[str, "DefaultType"] = DEFAULT, boost: Union[float, "DefaultType"] = DEFAULT, _name: Union[str, "DefaultType"] = DEFAULT, **kwargs: Any, ): super().__init__(value=value, boost=boost, _name=_name, **kwargs) python-elasticsearch-9.1.1/elasticsearch/dsl/response/000077500000000000000000000000001506101734100231035ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/dsl/response/__init__.py000066400000000000000000000327161506101734100252250ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import ( TYPE_CHECKING, Any, Dict, Generic, Iterator, List, Mapping, Optional, Sequence, Tuple, Union, cast, ) from ..utils import _R, AttrDict, AttrList, _wrap from .hit import Hit, HitMeta if TYPE_CHECKING: from .. import types from ..aggs import Agg from ..faceted_search_base import FacetedSearchBase from ..search_base import Request, SearchBase from ..update_by_query_base import UpdateByQueryBase __all__ = [ "Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta", "AggregateResponseType", ] class Response(AttrDict[Any], Generic[_R]): """An Elasticsearch search response. :arg took: (required) The number of milliseconds it took Elasticsearch to run the request. This value is calculated by measuring the time elapsed between receipt of a request on the coordinating node and the time at which the coordinating node is ready to send the response. It includes: * Communication time between the coordinating node and data nodes * Time the request spends in the search thread pool, queued for execution * Actual run time It does not include: * Time needed to send the request to Elasticsearch * Time needed to serialize the JSON response * Time needed to send the response to a client :arg timed_out: (required) If `true`, the request timed out before completion; returned results may be partial or empty. :arg _shards: (required) A count of shards used for the request. :arg hits: search results :arg aggregations: aggregation results :arg _clusters: :arg fields: :arg max_score: :arg num_reduce_phases: :arg profile: :arg pit_id: :arg _scroll_id: The identifier for the search and its search context. You can use this scroll ID with the scroll API to retrieve the next batch of search results for the request. This property is returned only if the `scroll` query parameter is specified in the request. :arg suggest: :arg terminated_early: """ _search: "SearchBase[_R]" _faceted_search: "FacetedSearchBase[_R]" _doc_class: Optional[_R] _hits: List[_R] took: int timed_out: bool _shards: "types.ShardStatistics" _clusters: "types.ClusterStatistics" fields: Mapping[str, Any] max_score: float num_reduce_phases: int profile: "types.Profile" pit_id: str _scroll_id: str suggest: Mapping[ str, Sequence[ Union["types.CompletionSuggest", "types.PhraseSuggest", "types.TermSuggest"] ], ] terminated_early: bool def __init__( self, search: "Request[_R]", response: Dict[str, Any], doc_class: Optional[_R] = None, ): super(AttrDict, self).__setattr__("_search", search) super(AttrDict, self).__setattr__("_doc_class", doc_class) super().__init__(response) def __iter__(self) -> Iterator[_R]: # type: ignore[override] return iter(self.hits) def __getitem__(self, key: Union[slice, int, str]) -> Any: if isinstance(key, (slice, int)): # for slicing etc return self.hits[key] return super().__getitem__(key) def __nonzero__(self) -> bool: return bool(self.hits) __bool__ = __nonzero__ def __repr__(self) -> str: return "" % (self.hits or self.aggregations) def __len__(self) -> int: return len(self.hits) def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]: # type: ignore[override] return self._d_, self._search, self._doc_class def __setstate__( self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]] # type: ignore[override] ) -> None: super(AttrDict, self).__setattr__("_d_", state[0]) super(AttrDict, self).__setattr__("_search", state[1]) super(AttrDict, self).__setattr__("_doc_class", state[2]) def success(self) -> bool: return self._shards.total == self._shards.successful and not self.timed_out @property def hits(self) -> List[_R]: if not hasattr(self, "_hits"): h = cast(AttrDict[Any], self._d_["hits"]) try: hits = AttrList(list(map(self._search._get_result, h["hits"]))) except AttributeError as e: # avoid raising AttributeError since it will be hidden by the property raise TypeError("Could not parse hits.", e) # avoid assigning _hits into self._d_ super(AttrDict, self).__setattr__("_hits", hits) for k in h: setattr(self._hits, k, _wrap(h[k])) return self._hits @property def aggregations(self) -> "AggResponse[_R]": return self.aggs @property def aggs(self) -> "AggResponse[_R]": if not hasattr(self, "_aggs"): aggs = AggResponse[_R]( cast("Agg[_R]", self._search.aggs), self._search, cast(Dict[str, Any], self._d_.get("aggregations", {})), ) # avoid assigning _aggs into self._d_ super(AttrDict, self).__setattr__("_aggs", aggs) return cast("AggResponse[_R]", self._aggs) def search_after(self) -> "SearchBase[_R]": """ Return a ``Search`` instance that retrieves the next page of results. This method provides an easy way to paginate a long list of results using the ``search_after`` option. For example:: page_size = 20 s = Search()[:page_size].sort("date") while True: # get a page of results r = await s.execute() # do something with this page of results # exit the loop if we reached the end if len(r.hits) < page_size: break # get a search object with the next page of results s = r.search_after() Note that the ``search_after`` option requires the search to have an explicit ``sort`` order. """ if len(self.hits) == 0: raise ValueError("Cannot use search_after when there are no search results") if not hasattr(self.hits[-1].meta, "sort"): # type: ignore[attr-defined] raise ValueError("Cannot use search_after when results are not sorted") return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore[attr-defined] AggregateResponseType = Union[ "types.CardinalityAggregate", "types.HdrPercentilesAggregate", "types.HdrPercentileRanksAggregate", "types.TDigestPercentilesAggregate", "types.TDigestPercentileRanksAggregate", "types.PercentilesBucketAggregate", "types.MedianAbsoluteDeviationAggregate", "types.MinAggregate", "types.MaxAggregate", "types.SumAggregate", "types.AvgAggregate", "types.WeightedAvgAggregate", "types.ValueCountAggregate", "types.SimpleValueAggregate", "types.DerivativeAggregate", "types.BucketMetricValueAggregate", "types.StatsAggregate", "types.StatsBucketAggregate", "types.ExtendedStatsAggregate", "types.ExtendedStatsBucketAggregate", "types.GeoBoundsAggregate", "types.GeoCentroidAggregate", "types.HistogramAggregate", "types.DateHistogramAggregate", "types.AutoDateHistogramAggregate", "types.VariableWidthHistogramAggregate", "types.StringTermsAggregate", "types.LongTermsAggregate", "types.DoubleTermsAggregate", "types.UnmappedTermsAggregate", "types.LongRareTermsAggregate", "types.StringRareTermsAggregate", "types.UnmappedRareTermsAggregate", "types.MultiTermsAggregate", "types.MissingAggregate", "types.NestedAggregate", "types.ReverseNestedAggregate", "types.GlobalAggregate", "types.FilterAggregate", "types.ChildrenAggregate", "types.ParentAggregate", "types.SamplerAggregate", "types.UnmappedSamplerAggregate", "types.GeoHashGridAggregate", "types.GeoTileGridAggregate", "types.GeoHexGridAggregate", "types.RangeAggregate", "types.DateRangeAggregate", "types.GeoDistanceAggregate", "types.IpRangeAggregate", "types.IpPrefixAggregate", "types.FiltersAggregate", "types.AdjacencyMatrixAggregate", "types.SignificantLongTermsAggregate", "types.SignificantStringTermsAggregate", "types.UnmappedSignificantTermsAggregate", "types.CompositeAggregate", "types.FrequentItemSetsAggregate", "types.TimeSeriesAggregate", "types.ScriptedMetricAggregate", "types.TopHitsAggregate", "types.InferenceAggregate", "types.StringStatsAggregate", "types.BoxPlotAggregate", "types.TopMetricsAggregate", "types.TTestAggregate", "types.RateAggregate", "types.CumulativeCardinalityAggregate", "types.MatrixStatsAggregate", "types.GeoLineAggregate", ] class AggResponse(AttrDict[Any], Generic[_R]): """An Elasticsearch aggregation response.""" _meta: Dict[str, Any] def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]): super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs}) super().__init__(data) def __getitem__(self, attr_name: str) -> AggregateResponseType: if attr_name in self._meta["aggs"]: # don't do self._meta['aggs'][attr_name] to avoid copying agg = self._meta["aggs"].aggs[attr_name] return cast( AggregateResponseType, agg.result(self._meta["search"], self._d_[attr_name]), ) return super().__getitem__(attr_name) # type: ignore[no-any-return] def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override] for name in self._meta["aggs"]: yield self[name] class UpdateByQueryResponse(AttrDict[Any], Generic[_R]): """An Elasticsearch update by query response. :arg batches: The number of scroll responses pulled back by the update by query. :arg failures: Array of failures if there were any unrecoverable errors during the process. If this is non-empty then the request ended because of those failures. Update by query is implemented using batches. Any failure causes the entire process to end, but all failures in the current batch are collected into the array. You can use the `conflicts` option to prevent reindex from ending when version conflicts occur. :arg noops: The number of documents that were ignored because the script used for the update by query returned a noop value for `ctx.op`. :arg deleted: The number of documents that were successfully deleted. :arg requests_per_second: The number of requests per second effectively run during the update by query. :arg retries: The number of retries attempted by update by query. `bulk` is the number of bulk actions retried. `search` is the number of search actions retried. :arg task: :arg timed_out: If true, some requests timed out during the update by query. :arg took: The number of milliseconds from start to end of the whole operation. :arg total: The number of documents that were successfully processed. :arg updated: The number of documents that were successfully updated. :arg version_conflicts: The number of version conflicts that the update by query hit. :arg throttled: :arg throttled_millis: The number of milliseconds the request slept to conform to `requests_per_second`. :arg throttled_until: :arg throttled_until_millis: This field should always be equal to zero in an _update_by_query response. It only has meaning when using the task API, where it indicates the next time (in milliseconds since epoch) a throttled request will be run again in order to conform to `requests_per_second`. """ _search: "UpdateByQueryBase[_R]" batches: int failures: Sequence["types.BulkIndexByScrollFailure"] noops: int deleted: int requests_per_second: float retries: "types.Retries" task: str timed_out: bool took: Any total: int updated: int version_conflicts: int throttled: Any throttled_millis: Any throttled_until: Any throttled_until_millis: Any def __init__( self, search: "Request[_R]", response: Dict[str, Any], doc_class: Optional[_R] = None, ): super(AttrDict, self).__setattr__("_search", search) super(AttrDict, self).__setattr__("_doc_class", doc_class) super().__init__(response) def success(self) -> bool: return not self.timed_out and not self.failures python-elasticsearch-9.1.1/elasticsearch/dsl/response/aggs.py000066400000000000000000000064501506101734100244030ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union, cast from ..utils import _R, AttrDict, AttrList from . import AggResponse, Response if TYPE_CHECKING: from ..aggs import Agg from ..field import Field from ..search_base import SearchBase class Bucket(AggResponse[_R]): def __init__( self, aggs: "Agg[_R]", search: "SearchBase[_R]", data: Dict[str, Any], field: Optional["Field"] = None, ): super().__init__(aggs, search, data) class FieldBucket(Bucket[_R]): def __init__( self, aggs: "Agg[_R]", search: "SearchBase[_R]", data: Dict[str, Any], field: Optional["Field"] = None, ): if field: data["key"] = field.deserialize(data["key"]) super().__init__(aggs, search, data, field) class BucketData(AggResponse[_R]): _bucket_class = Bucket _buckets: Union[AttrDict[Any], AttrList[Any]] def _wrap_bucket(self, data: Dict[str, Any]) -> Bucket[_R]: return self._bucket_class( self._meta["aggs"], self._meta["search"], data, field=self._meta.get("field"), ) def __iter__(self) -> Iterator["Agg"]: # type: ignore[override] return iter(self.buckets) def __len__(self) -> int: return len(self.buckets) def __getitem__(self, key: Any) -> Any: if isinstance(key, (int, slice)): return cast(AttrList[Any], self.buckets)[key] return super().__getitem__(key) @property def buckets(self) -> Union[AttrDict[Any], AttrList[Any]]: if not hasattr(self, "_buckets"): field = getattr(self._meta["aggs"], "field", None) if field: self._meta["field"] = self._meta["search"]._resolve_field(field) bs = cast(Union[Dict[str, Any], List[Any]], self._d_["buckets"]) if isinstance(bs, list): ret = AttrList(bs, obj_wrapper=self._wrap_bucket) else: ret = AttrDict[Any]({k: self._wrap_bucket(bs[k]) for k in bs}) # type: ignore[assignment] super(AttrDict, self).__setattr__("_buckets", ret) return self._buckets class FieldBucketData(BucketData[_R]): _bucket_class = FieldBucket class TopHitsData(Response[_R]): def __init__(self, agg: "Agg[_R]", search: "SearchBase[_R]", data: Any): super(AttrDict, self).__setattr__( "meta", AttrDict({"agg": agg, "search": search}) ) super().__init__(search, data) python-elasticsearch-9.1.1/elasticsearch/dsl/response/hit.py000066400000000000000000000040771506101734100242510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, List, Tuple, cast from ..utils import AttrDict, HitMeta class Hit(AttrDict[Any]): def __init__(self, document: Dict[str, Any]): data: Dict[str, Any] = {} if "_source" in document: data = cast(Dict[str, Any], document["_source"]) if "fields" in document: data.update(cast(Dict[str, Any], document["fields"])) super().__init__(data) # assign meta as attribute and not as key in self._d_ super(AttrDict, self).__setattr__("meta", HitMeta(document)) def __getstate__(self) -> Tuple[Dict[str, Any], HitMeta]: # type: ignore[override] # add self.meta since it is not in self.__dict__ return super().__getstate__() + (self.meta,) def __setstate__(self, state: Tuple[Dict[str, Any], HitMeta]) -> None: # type: ignore[override] super(AttrDict, self).__setattr__("meta", state[-1]) super().__setstate__(state[:-1]) def __dir__(self) -> List[str]: # be sure to expose meta in dir(self) return super().__dir__() + ["meta"] def __repr__(self) -> str: return "".format( "/".join( getattr(self.meta, key) for key in ("index", "id") if key in self.meta ), super().__repr__(), ) python-elasticsearch-9.1.1/elasticsearch/dsl/search.py000066400000000000000000000017371506101734100230740ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.search import AsyncEmptySearch, AsyncMultiSearch, AsyncSearch # noqa: F401 from ._sync.search import EmptySearch, MultiSearch, Search # noqa: F401 from .search_base import Q # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/dsl/search_base.py000066400000000000000000001042231506101734100240600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc import copy from typing import ( TYPE_CHECKING, Any, Callable, Dict, Generic, Iterator, List, Optional, Protocol, Tuple, Type, Union, cast, overload, ) from typing_extensions import Self, TypeVar from .aggs import A, Agg, AggBase from .document_base import InstrumentedField from .exceptions import IllegalOperation from .query import Bool, Q, Query from .response import Hit, Response from .utils import _R, AnyUsingType, AttrDict, DslBase, recursive_to_dict if TYPE_CHECKING: from .field import Field, Object class SupportsClone(Protocol): def _clone(self) -> Self: ... _S = TypeVar("_S", bound=SupportsClone) class QueryProxy(Generic[_S]): """ Simple proxy around DSL objects (queries) that can be called (to add query/post_filter) and also allows attribute access which is proxied to the wrapped query. """ def __init__(self, search: _S, attr_name: str): self._search = search self._proxied: Optional[Query] = None self._attr_name = attr_name def __nonzero__(self) -> bool: return self._proxied is not None __bool__ = __nonzero__ def __call__(self, *args: Any, **kwargs: Any) -> _S: """ Add a query. """ s = self._search._clone() # we cannot use self._proxied since we just cloned self._search and # need to access the new self on the clone proxied = getattr(s, self._attr_name) if proxied._proxied is None: proxied._proxied = Q(*args, **kwargs) else: proxied._proxied &= Q(*args, **kwargs) # always return search to be chainable return s def __getattr__(self, attr_name: str) -> Any: return getattr(self._proxied, attr_name) def __setattr__(self, attr_name: str, value: Any) -> None: if not attr_name.startswith("_"): if self._proxied is not None: self._proxied = Q(self._proxied.to_dict()) setattr(self._proxied, attr_name, value) super().__setattr__(attr_name, value) def __getstate__(self) -> Tuple[_S, Optional[Query], str]: return self._search, self._proxied, self._attr_name def __setstate__(self, state: Tuple[_S, Optional[Query], str]) -> None: self._search, self._proxied, self._attr_name = state class ProxyDescriptor(Generic[_S]): """ Simple descriptor to enable setting of queries and filters as: s = Search() s.query = Q(...) """ def __init__(self, name: str): self._attr_name = f"_{name}_proxy" def __get__(self, instance: Any, owner: object) -> QueryProxy[_S]: return cast(QueryProxy[_S], getattr(instance, self._attr_name)) def __set__(self, instance: _S, value: Dict[str, Any]) -> None: proxy: QueryProxy[_S] = getattr(instance, self._attr_name) proxy._proxied = Q(value) class AggsProxy(AggBase[_R], DslBase): name = "aggs" def __init__(self, search: "SearchBase[_R]"): self._base = cast("Agg[_R]", self) self._search = search self._params = {"aggs": {}} def to_dict(self) -> Dict[str, Any]: return cast(Dict[str, Any], super().to_dict().get("aggs", {})) class Request(Generic[_R]): def __init__( self, using: AnyUsingType = "default", index: Optional[Union[str, List[str]]] = None, doc_type: Optional[ Union[type, str, List[Union[type, str]], Dict[str, Union[type, str]]] ] = None, extra: Optional[Dict[str, Any]] = None, ): self._using = using self._index = None if isinstance(index, (tuple, list)): self._index = list(index) elif index: self._index = [index] self._doc_type: List[Union[type, str]] = [] self._doc_type_map: Dict[str, Any] = {} if isinstance(doc_type, (tuple, list)): self._doc_type.extend(doc_type) elif isinstance(doc_type, collections.abc.Mapping): self._doc_type.extend(doc_type.keys()) self._doc_type_map.update(doc_type) elif doc_type: self._doc_type.append(doc_type) self._params: Dict[str, Any] = {} self._extra: Dict[str, Any] = extra or {} def __eq__(self, other: Any) -> bool: return ( isinstance(other, Request) and other._params == self._params and other._index == self._index and other._doc_type == self._doc_type and other.to_dict() == self.to_dict() ) def __copy__(self) -> Self: return self._clone() def params(self, **kwargs: Any) -> Self: """ Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/latest/api/elasticsearch.html#elasticsearch.Elasticsearch.search for all available parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local') """ s = self._clone() s._params.update(kwargs) return s def index(self, *index: Union[str, List[str], Tuple[str, ...]]) -> Self: """ Set the index for the search. If called empty it will remove all information. Example:: s = Search() s = s.index('twitter-2015.01.01', 'twitter-2015.01.02') s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02']) """ # .index() resets s = self._clone() if not index: s._index = None else: indexes = [] for i in index: if isinstance(i, str): indexes.append(i) elif isinstance(i, list): indexes += i elif isinstance(i, tuple): indexes += list(i) s._index = (self._index or []) + indexes return s def _resolve_field(self, path: str) -> Optional["Field"]: for dt in self._doc_type: if not hasattr(dt, "_index"): continue field = dt._index.resolve_field(path) if field is not None: return cast("Field", field) return None def _resolve_nested( self, hit: AttrDict[Any], parent_class: Optional[type] = None ) -> Type[_R]: doc_class = Hit nested_path = [] nesting = hit["_nested"] while nesting and "field" in nesting: nested_path.append(nesting["field"]) nesting = nesting.get("_nested") nested_path_str = ".".join(nested_path) nested_field: Optional["Object"] if parent_class is not None and hasattr(parent_class, "_index"): nested_field = cast( Optional["Object"], parent_class._index.resolve_field(nested_path_str) ) else: nested_field = cast( Optional["Object"], self._resolve_field(nested_path_str) ) if nested_field is not None: return cast(Type[_R], nested_field._doc_class) return cast(Type[_R], doc_class) def _get_result( self, hit: AttrDict[Any], parent_class: Optional[type] = None ) -> _R: doc_class: Any = Hit dt = hit.get("_type") if "_nested" in hit: doc_class = self._resolve_nested(hit, parent_class) elif dt in self._doc_type_map: doc_class = self._doc_type_map[dt] else: for doc_type in self._doc_type: if hasattr(doc_type, "_matches") and doc_type._matches(hit): doc_class = doc_type break for t in hit.get("inner_hits", ()): hit["inner_hits"][t] = Response[_R]( self, hit["inner_hits"][t], doc_class=doc_class ) callback = getattr(doc_class, "from_es", doc_class) return cast(_R, callback(hit)) def doc_type( self, *doc_type: Union[type, str], **kwargs: Callable[[AttrDict[Any]], Any] ) -> Self: """ Set the type to search through. You can supply a single value or multiple. Values can be strings or subclasses of ``Document``. You can also pass in any keyword arguments, mapping a doc_type to a callback that should be used instead of the Hit class. If no doc_type is supplied any information stored on the instance will be erased. Example: s = Search().doc_type('product', 'store', User, custom=my_callback) """ # .doc_type() resets s = self._clone() if not doc_type and not kwargs: s._doc_type = [] s._doc_type_map = {} else: s._doc_type.extend(doc_type) s._doc_type.extend(kwargs.keys()) s._doc_type_map.update(kwargs) return s def using(self, client: AnyUsingType) -> Self: """ Associate the search request with an elasticsearch client. A fresh copy will be returned with current instance remaining unchanged. :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or an alias to look up in ``elasticsearch.dsl.connections`` """ s = self._clone() s._using = client return s def extra(self, **kwargs: Any) -> Self: """ Add extra keys to the request body. Mostly here for backwards compatibility. """ s = self._clone() if "from_" in kwargs: kwargs["from"] = kwargs.pop("from_") s._extra.update(kwargs) return s def _clone(self) -> Self: s = self.__class__( using=self._using, index=self._index, doc_type=self._doc_type ) s._doc_type_map = self._doc_type_map.copy() s._extra = self._extra.copy() s._params = self._params.copy() return s if TYPE_CHECKING: def to_dict(self) -> Dict[str, Any]: ... class SearchBase(Request[_R]): query = ProxyDescriptor[Self]("query") post_filter = ProxyDescriptor[Self]("post_filter") _response: Response[_R] def __init__( self, using: AnyUsingType = "default", index: Optional[Union[str, List[str]]] = None, **kwargs: Any, ): """ Search request to elasticsearch. :arg using: `Elasticsearch` instance to use :arg index: limit the search to index All the parameters supplied (or omitted) at creation type can be later overridden by methods (`using`, `index` and `doc_type` respectively). """ super().__init__(using=using, index=index, **kwargs) self.aggs = AggsProxy[_R](self) self._sort: List[Union[str, Dict[str, Dict[str, str]]]] = [] self._knn: List[Dict[str, Any]] = [] self._rank: Dict[str, Any] = {} self._collapse: Dict[str, Any] = {} self._source: Optional[Union[bool, List[str], Dict[str, List[str]]]] = None self._highlight: Dict[str, Any] = {} self._highlight_opts: Dict[str, Any] = {} self._suggest: Dict[str, Any] = {} self._script_fields: Dict[str, Any] = {} self._response_class = Response[_R] self._query_proxy = QueryProxy(self, "query") self._post_filter_proxy = QueryProxy(self, "post_filter") def filter(self, *args: Any, **kwargs: Any) -> Self: """ Add a query in filter context. """ return self.query(Bool(filter=[Q(*args, **kwargs)])) def exclude(self, *args: Any, **kwargs: Any) -> Self: """ Add a negative query in filter context. """ return self.query(Bool(filter=[~Q(*args, **kwargs)])) def __getitem__(self, n: Union[int, slice]) -> Self: """ Support slicing the `Search` instance for pagination. Slicing equates to the from/size parameters. E.g.:: s = Search().query(...)[0:25] is equivalent to:: s = Search().query(...).extra(from_=0, size=25) """ s = self._clone() if isinstance(n, slice): # If negative slicing, abort. if n.start and n.start < 0 or n.stop and n.stop < 0: raise ValueError("Search does not support negative slicing.") slice_start = n.start slice_stop = n.stop else: # This is an index lookup, equivalent to slicing by [n:n+1]. # If negative index, abort. if n < 0: raise ValueError("Search does not support negative indexing.") slice_start = n slice_stop = n + 1 old_from = s._extra.get("from") old_to = None if "size" in s._extra: old_to = (old_from or 0) + s._extra["size"] new_from = old_from if slice_start is not None: new_from = (old_from or 0) + slice_start new_to = old_to if slice_stop is not None: new_to = (old_from or 0) + slice_stop if old_to is not None and old_to < new_to: new_to = old_to if new_from is not None: s._extra["from"] = new_from if new_to is not None: s._extra["size"] = max(0, new_to - (new_from or 0)) return s @classmethod def from_dict(cls, d: Dict[str, Any]) -> Self: """ Construct a new `Search` instance from a raw dict containing the search body. Useful when migrating from raw dictionaries. Example:: s = Search.from_dict({ "query": { "bool": { "must": [...] } }, "aggs": {...} }) s = s.filter('term', published=True) """ s = cls() s.update_from_dict(d) return s def _clone(self) -> Self: """ Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs. """ s = super()._clone() s._response_class = self._response_class s._knn = [knn.copy() for knn in self._knn] s._rank = self._rank.copy() s._collapse = self._collapse.copy() s._sort = self._sort[:] s._source = copy.copy(self._source) if self._source is not None else None s._highlight = self._highlight.copy() s._highlight_opts = self._highlight_opts.copy() s._suggest = self._suggest.copy() s._script_fields = self._script_fields.copy() for x in ("query", "post_filter"): getattr(s, x)._proxied = getattr(self, x)._proxied # copy top-level bucket definitions if self.aggs._params.get("aggs"): s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()} return s def response_class(self, cls: Type[Response[_R]]) -> Self: """ Override the default wrapper used for the response. """ s = self._clone() s._response_class = cls return s def update_from_dict(self, d: Dict[str, Any]) -> Self: """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if "query" in d: self.query._proxied = Q(d.pop("query")) if "post_filter" in d: self.post_filter._proxied = Q(d.pop("post_filter")) aggs = d.pop("aggs", d.pop("aggregations", {})) if aggs: self.aggs._params = { "aggs": {name: A(value) for (name, value) in aggs.items()} } if "knn" in d: self._knn = d.pop("knn") if isinstance(self._knn, dict): self._knn = [self._knn] if "rank" in d: self._rank = d.pop("rank") if "collapse" in d: self._collapse = d.pop("collapse") if "sort" in d: self._sort = d.pop("sort") if "_source" in d: self._source = d.pop("_source") if "highlight" in d: high = d.pop("highlight").copy() self._highlight = high.pop("fields") self._highlight_opts = high if "suggest" in d: self._suggest = d.pop("suggest") if "text" in self._suggest: text = self._suggest.pop("text") for s in self._suggest.values(): s.setdefault("text", text) if "script_fields" in d: self._script_fields = d.pop("script_fields") self._extra.update(d) return self def script_fields(self, **kwargs: Any) -> Self: """ Define script fields to be calculated on hits. See https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html for more details. Example:: s = Search() s = s.script_fields(times_two="doc['field'].value * 2") s = s.script_fields( times_three={ 'script': { 'lang': 'painless', 'source': "doc['field'].value * params.n", 'params': {'n': 3} } } ) """ s = self._clone() for name in kwargs: if isinstance(kwargs[name], str): kwargs[name] = {"script": kwargs[name]} s._script_fields.update(kwargs) return s def knn( self, field: Union[str, "InstrumentedField"], k: int, num_candidates: int, query_vector: Optional[List[float]] = None, query_vector_builder: Optional[Dict[str, Any]] = None, boost: Optional[float] = None, filter: Optional[Query] = None, similarity: Optional[float] = None, inner_hits: Optional[Dict[str, Any]] = None, ) -> Self: """ Add a k-nearest neighbor (kNN) search. :arg field: the vector field to search against as a string or document class attribute :arg k: number of nearest neighbors to return as top hits :arg num_candidates: number of nearest neighbor candidates to consider per shard :arg query_vector: the vector to search for :arg query_vector_builder: A dictionary indicating how to build a query vector :arg boost: A floating-point boost factor for kNN scores :arg filter: query to filter the documents that can match :arg similarity: the minimum similarity required for a document to be considered a match, as a float value :arg inner_hits: retrieve hits from nested field Example:: s = Search() s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector, filter=Q('term', category='blog'))) """ s = self._clone() s._knn.append( { "field": str(field), # str() is for InstrumentedField instances "k": k, "num_candidates": num_candidates, } ) if query_vector is None and query_vector_builder is None: raise ValueError("one of query_vector and query_vector_builder is required") if query_vector is not None and query_vector_builder is not None: raise ValueError( "only one of query_vector and query_vector_builder must be given" ) if query_vector is not None: s._knn[-1]["query_vector"] = cast(Any, query_vector) if query_vector_builder is not None: s._knn[-1]["query_vector_builder"] = query_vector_builder if boost is not None: s._knn[-1]["boost"] = boost if filter is not None: if isinstance(filter, Query): s._knn[-1]["filter"] = filter.to_dict() else: s._knn[-1]["filter"] = filter if similarity is not None: s._knn[-1]["similarity"] = similarity if inner_hits is not None: s._knn[-1]["inner_hits"] = inner_hits return s def rank(self, rrf: Optional[Union[bool, Dict[str, Any]]] = None) -> Self: """ Defines a method for combining and ranking results sets from a combination of searches. Requires a minimum of 2 results sets. :arg rrf: Set to ``True`` or an options dictionary to set the rank method to reciprocal rank fusion (RRF). Example:: s = Search() s = s.query('match', content='search text') s = s.knn(field='embedding', k=5, num_candidates=10, query_vector=vector) s = s.rank(rrf=True) Note: This option is in technical preview and may change in the future. The syntax will likely change before GA. """ s = self._clone() s._rank = {} if rrf is not None and rrf is not False: s._rank["rrf"] = {} if rrf is True else rrf return s def source( self, fields: Optional[ Union[ bool, str, "InstrumentedField", List[Union[str, "InstrumentedField"]], Dict[str, List[Union[str, "InstrumentedField"]]], ] ] = None, **kwargs: Any, ) -> Self: """ Selectively control how the _source field is returned. :arg fields: field name, wildcard string, list of field names or wildcards, or dictionary of includes and excludes :arg kwargs: ``includes`` or ``excludes`` arguments, when ``fields`` is ``None``. When no arguments are given, the entire document will be returned for each hit. If ``fields`` is a string or list of strings, the field names or field wildcards given will be included. If ``fields`` is a dictionary with keys of 'includes' and/or 'excludes' the fields will be either included or excluded appropriately. Calling this multiple times with the same named parameter will override the previous values with the new ones. Example:: s = Search() s = s.source(includes=['obj1.*'], excludes=["*.description"]) s = Search() s = s.source(includes=['obj1.*']).source(excludes=["*.description"]) """ s = self._clone() if fields and kwargs: raise ValueError("You cannot specify fields and kwargs at the same time.") @overload def ensure_strings(fields: str) -> str: ... @overload def ensure_strings(fields: "InstrumentedField") -> str: ... @overload def ensure_strings( fields: List[Union[str, "InstrumentedField"]], ) -> List[str]: ... @overload def ensure_strings( fields: Dict[str, List[Union[str, "InstrumentedField"]]], ) -> Dict[str, List[str]]: ... def ensure_strings( fields: Union[ str, "InstrumentedField", List[Union[str, "InstrumentedField"]], Dict[str, List[Union[str, "InstrumentedField"]]], ], ) -> Union[str, List[str], Dict[str, List[str]]]: if isinstance(fields, dict): return {k: ensure_strings(v) for k, v in fields.items()} elif not isinstance(fields, (str, InstrumentedField)): # we assume that if `fields` is not a any of [dict, str, # InstrumentedField] then it is an iterable of strings or # InstrumentedFields, so we convert them to a plain list of # strings return [str(f) for f in fields] else: return str(fields) if fields is not None: s._source = fields if isinstance(fields, bool) else ensure_strings(fields) # type: ignore[assignment] return s if kwargs and not isinstance(s._source, dict): s._source = {} if isinstance(s._source, dict): for key, value in kwargs.items(): if value is None: try: del s._source[key] except KeyError: pass else: s._source[key] = ensure_strings(value) return s def sort( self, *keys: Union[str, "InstrumentedField", Dict[str, Dict[str, str]]] ) -> Self: """ Add sorting information to the search request. If called without arguments it will remove all sort requirements. Otherwise it will replace them. Acceptable arguments are:: 'some.field' '-some.other.field' {'different.field': {'any': 'dict'}} so for example:: s = Search().sort( 'category', '-title', {"price" : {"order" : "asc", "mode" : "avg"}} ) will sort by ``category``, ``title`` (in descending order) and ``price`` in ascending order using the ``avg`` mode. The API returns a copy of the Search object and can thus be chained. """ s = self._clone() s._sort = [] for k in keys: if not isinstance(k, dict): sort_field = str(k) if sort_field.startswith("-"): if sort_field[1:] == "_score": raise IllegalOperation("Sorting by `-_score` is not allowed.") s._sort.append({sort_field[1:]: {"order": "desc"}}) else: s._sort.append(sort_field) else: s._sort.append(k) return s def collapse( self, field: Optional[Union[str, "InstrumentedField"]] = None, inner_hits: Optional[Dict[str, Any]] = None, max_concurrent_group_searches: Optional[int] = None, ) -> Self: """ Add collapsing information to the search request. If called without providing ``field``, it will remove all collapse requirements, otherwise it will replace them with the provided arguments. The API returns a copy of the Search object and can thus be chained. """ s = self._clone() s._collapse = {} if field is None: return s s._collapse["field"] = str(field) if inner_hits: s._collapse["inner_hits"] = inner_hits if max_concurrent_group_searches: s._collapse["max_concurrent_group_searches"] = max_concurrent_group_searches return s def highlight_options(self, **kwargs: Any) -> Self: """ Update the global highlighting options used for this request. For example:: s = Search() s = s.highlight_options(order='score') """ s = self._clone() s._highlight_opts.update(kwargs) return s def highlight( self, *fields: Union[str, "InstrumentedField"], **kwargs: Any ) -> Self: """ Request highlighting of some fields. All keyword arguments passed in will be used as parameters for all the fields in the ``fields`` parameter. Example:: Search().highlight('title', 'body', fragment_size=50) will produce the equivalent of:: { "highlight": { "fields": { "body": {"fragment_size": 50}, "title": {"fragment_size": 50} } } } If you want to have different options for different fields you can call ``highlight`` twice:: Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100) which will produce:: { "highlight": { "fields": { "body": {"fragment_size": 100}, "title": {"fragment_size": 50} } } } """ s = self._clone() for f in fields: s._highlight[str(f)] = kwargs return s def suggest( self, name: str, text: Optional[str] = None, regex: Optional[str] = None, **kwargs: Any, ) -> Self: """ Add a suggestions request to the search. :arg name: name of the suggestion :arg text: text to suggest on All keyword arguments will be added to the suggestions body. For example:: s = Search() s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'}) # regex query for Completion Suggester s = Search() s = s.suggest('suggestion-1', regex='py[thon|py]', completion={'field': 'body'}) """ if text is None and regex is None: raise ValueError('You have to pass "text" or "regex" argument.') if text and regex: raise ValueError('You can only pass either "text" or "regex" argument.') if regex and "completion" not in kwargs: raise ValueError( '"regex" argument must be passed with "completion" keyword argument.' ) s = self._clone() if regex: s._suggest[name] = {"regex": regex} elif text: if "completion" in kwargs: s._suggest[name] = {"prefix": text} else: s._suggest[name] = {"text": text} s._suggest[name].update(kwargs) return s def search_after(self) -> Self: """ Return a ``Search`` instance that retrieves the next page of results. This method provides an easy way to paginate a long list of results using the ``search_after`` option. For example:: page_size = 20 s = Search()[:page_size].sort("date") while True: # get a page of results r = await s.execute() # do something with this page of results # exit the loop if we reached the end if len(r.hits) < page_size: break # get a search object with the next page of results s = s.search_after() Note that the ``search_after`` option requires the search to have an explicit ``sort`` order. """ if not hasattr(self, "_response"): raise ValueError("A search must be executed before using search_after") return cast(Self, self._response.search_after()) def to_dict(self, count: bool = False, **kwargs: Any) -> Dict[str, Any]: """ Serialize the search into the dictionary that will be sent over as the request's body. :arg count: a flag to specify if we are interested in a body for count - no aggregations, no pagination bounds etc. All additional keyword arguments will be included into the dictionary. """ d = {} if self.query: d["query"] = recursive_to_dict(self.query) if self._knn: if len(self._knn) == 1: d["knn"] = self._knn[0] else: d["knn"] = self._knn if self._rank: d["rank"] = self._rank # count request doesn't care for sorting and other things if not count: if self.post_filter: d["post_filter"] = recursive_to_dict(self.post_filter.to_dict()) if self.aggs.aggs: d.update(recursive_to_dict(self.aggs.to_dict())) if self._sort: d["sort"] = self._sort if self._collapse: d["collapse"] = self._collapse d.update(recursive_to_dict(self._extra)) if self._source not in (None, {}): d["_source"] = self._source if self._highlight: d["highlight"] = {"fields": self._highlight} d["highlight"].update(self._highlight_opts) if self._suggest: d["suggest"] = self._suggest if self._script_fields: d["script_fields"] = self._script_fields d.update(recursive_to_dict(kwargs)) return d class MultiSearchBase(Request[_R]): """ Combine multiple :class:`~elasticsearch.dsl.Search` objects into a single request. """ def __init__(self, **kwargs: Any): super().__init__(**kwargs) self._searches: List[SearchBase[_R]] = [] def __getitem__(self, key: Union[int, slice]) -> Any: return self._searches[key] def __iter__(self) -> Iterator[SearchBase[_R]]: return iter(self._searches) def _clone(self) -> Self: ms = super()._clone() ms._searches = self._searches[:] return ms def add(self, search: SearchBase[_R]) -> Self: """ Adds a new :class:`~elasticsearch.dsl.Search` object to the request:: ms = MultiSearch(index='my-index') ms = ms.add(Search(doc_type=Category).filter('term', category='python')) ms = ms.add(Search(doc_type=Blog)) """ ms = self._clone() ms._searches.append(search) return ms def to_dict(self) -> List[Dict[str, Any]]: # type: ignore[override] out: List[Dict[str, Any]] = [] for s in self._searches: meta: Dict[str, Any] = {} if s._index: meta["index"] = cast(Any, s._index) meta.update(s._params) out.append(meta) out.append(s.to_dict()) return out python-elasticsearch-9.1.1/elasticsearch/dsl/serializer.py000066400000000000000000000022451506101734100237730ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any from elasticsearch.serializer import JSONSerializer from .utils import AttrList class AttrJSONSerializer(JSONSerializer): def default(self, data: Any) -> Any: if isinstance(data, AttrList): return data._l_ if hasattr(data, "to_dict"): return data.to_dict() return super().default(data) serializer = AttrJSONSerializer() python-elasticsearch-9.1.1/elasticsearch/dsl/types.py000066400000000000000000006406421506101734100227770ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Literal, Mapping, Sequence, Union from elastic_transport.client_utils import DEFAULT, DefaultType from . import Query from .document_base import InstrumentedField from .utils import AttrDict PipeSeparatedFlags = str class BucketCorrelationFunction(AttrDict[Any]): """ :arg count_correlation: (required) The configuration to calculate a count correlation. This function is designed for determining the correlation of a term value and a given metric. """ count_correlation: Union[ "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType ] def __init__( self, *, count_correlation: Union[ "BucketCorrelationFunctionCountCorrelation", Dict[str, Any], DefaultType ] = DEFAULT, **kwargs: Any, ): if count_correlation is not DEFAULT: kwargs["count_correlation"] = count_correlation super().__init__(kwargs) class BucketCorrelationFunctionCountCorrelation(AttrDict[Any]): """ :arg indicator: (required) The indicator with which to correlate the configured `bucket_path` values. """ indicator: Union[ "BucketCorrelationFunctionCountCorrelationIndicator", Dict[str, Any], DefaultType, ] def __init__( self, *, indicator: Union[ "BucketCorrelationFunctionCountCorrelationIndicator", Dict[str, Any], DefaultType, ] = DEFAULT, **kwargs: Any, ): if indicator is not DEFAULT: kwargs["indicator"] = indicator super().__init__(kwargs) class BucketCorrelationFunctionCountCorrelationIndicator(AttrDict[Any]): """ :arg doc_count: (required) The total number of documents that initially created the expectations. It’s required to be greater than or equal to the sum of all values in the buckets_path as this is the originating superset of data to which the term values are correlated. :arg expectations: (required) An array of numbers with which to correlate the configured `bucket_path` values. The length of this value must always equal the number of buckets returned by the `bucket_path`. :arg fractions: An array of fractions to use when averaging and calculating variance. This should be used if the pre-calculated data and the buckets_path have known gaps. The length of fractions, if provided, must equal expectations. """ doc_count: Union[int, DefaultType] expectations: Union[Sequence[float], DefaultType] fractions: Union[Sequence[float], DefaultType] def __init__( self, *, doc_count: Union[int, DefaultType] = DEFAULT, expectations: Union[Sequence[float], DefaultType] = DEFAULT, fractions: Union[Sequence[float], DefaultType] = DEFAULT, **kwargs: Any, ): if doc_count is not DEFAULT: kwargs["doc_count"] = doc_count if expectations is not DEFAULT: kwargs["expectations"] = expectations if fractions is not DEFAULT: kwargs["fractions"] = fractions super().__init__(kwargs) class ChiSquareHeuristic(AttrDict[Any]): """ :arg background_is_superset: (required) Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. :arg include_negatives: (required) Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. """ background_is_superset: Union[bool, DefaultType] include_negatives: Union[bool, DefaultType] def __init__( self, *, background_is_superset: Union[bool, DefaultType] = DEFAULT, include_negatives: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if background_is_superset is not DEFAULT: kwargs["background_is_superset"] = background_is_superset if include_negatives is not DEFAULT: kwargs["include_negatives"] = include_negatives super().__init__(kwargs) class ChunkingSettings(AttrDict[Any]): """ :arg strategy: (required) The chunking strategy: `sentence`, `word`, `none` or `recursive`. * If `strategy` is set to `recursive`, you must also specify: - `max_chunk_size` - either `separators` or`separator_group` Learn more about different chunking strategies in the linked documentation. Defaults to `sentence` if omitted. :arg max_chunk_size: (required) The maximum size of a chunk in words. This value cannot be higher than `300` or lower than `20` (for `sentence` strategy) or `10` (for `word` strategy). Defaults to `250` if omitted. :arg separator_group: Only applicable to the `recursive` strategy and required when using it. Sets a predefined list of separators in the saved chunking settings based on the selected text type. Values can be `markdown` or `plaintext`. Using this parameter is an alternative to manually specifying a custom `separators` list. :arg separators: Only applicable to the `recursive` strategy and required when using it. A list of strings used as possible split points when chunking text. Each string can be a plain string or a regular expression (regex) pattern. The system tries each separator in order to split the text, starting from the first item in the list. After splitting, it attempts to recombine smaller pieces into larger chunks that stay within the `max_chunk_size` limit, to reduce the total number of chunks generated. :arg overlap: The number of overlapping words for chunks. It is applicable only to a `word` chunking strategy. This value cannot be higher than half the `max_chunk_size` value. Defaults to `100` if omitted. :arg sentence_overlap: The number of overlapping sentences for chunks. It is applicable only for a `sentence` chunking strategy. It can be either `1` or `0`. Defaults to `1` if omitted. """ strategy: Union[str, DefaultType] max_chunk_size: Union[int, DefaultType] separator_group: Union[str, DefaultType] separators: Union[Sequence[str], DefaultType] overlap: Union[int, DefaultType] sentence_overlap: Union[int, DefaultType] def __init__( self, *, strategy: Union[str, DefaultType] = DEFAULT, max_chunk_size: Union[int, DefaultType] = DEFAULT, separator_group: Union[str, DefaultType] = DEFAULT, separators: Union[Sequence[str], DefaultType] = DEFAULT, overlap: Union[int, DefaultType] = DEFAULT, sentence_overlap: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if strategy is not DEFAULT: kwargs["strategy"] = strategy if max_chunk_size is not DEFAULT: kwargs["max_chunk_size"] = max_chunk_size if separator_group is not DEFAULT: kwargs["separator_group"] = separator_group if separators is not DEFAULT: kwargs["separators"] = separators if overlap is not DEFAULT: kwargs["overlap"] = overlap if sentence_overlap is not DEFAULT: kwargs["sentence_overlap"] = sentence_overlap super().__init__(kwargs) class ClassificationInferenceOptions(AttrDict[Any]): """ :arg num_top_classes: Specifies the number of top class predictions to return. Defaults to 0. :arg num_top_feature_importance_values: Specifies the maximum number of feature importance values per document. :arg prediction_field_type: Specifies the type of the predicted field to write. Acceptable values are: string, number, boolean. When boolean is provided 1.0 is transformed to true and 0.0 to false. :arg results_field: The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. :arg top_classes_results_field: Specifies the field to which the top classes are written. Defaults to top_classes. """ num_top_classes: Union[int, DefaultType] num_top_feature_importance_values: Union[int, DefaultType] prediction_field_type: Union[str, DefaultType] results_field: Union[str, DefaultType] top_classes_results_field: Union[str, DefaultType] def __init__( self, *, num_top_classes: Union[int, DefaultType] = DEFAULT, num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT, prediction_field_type: Union[str, DefaultType] = DEFAULT, results_field: Union[str, DefaultType] = DEFAULT, top_classes_results_field: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if num_top_classes is not DEFAULT: kwargs["num_top_classes"] = num_top_classes if num_top_feature_importance_values is not DEFAULT: kwargs["num_top_feature_importance_values"] = ( num_top_feature_importance_values ) if prediction_field_type is not DEFAULT: kwargs["prediction_field_type"] = prediction_field_type if results_field is not DEFAULT: kwargs["results_field"] = results_field if top_classes_results_field is not DEFAULT: kwargs["top_classes_results_field"] = top_classes_results_field super().__init__(kwargs) class CommonTermsQuery(AttrDict[Any]): """ :arg query: (required) :arg analyzer: :arg cutoff_frequency: :arg high_freq_operator: :arg low_freq_operator: :arg minimum_should_match: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ query: Union[str, DefaultType] analyzer: Union[str, DefaultType] cutoff_frequency: Union[float, DefaultType] high_freq_operator: Union[Literal["and", "or"], DefaultType] low_freq_operator: Union[Literal["and", "or"], DefaultType] minimum_should_match: Union[int, str, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, query: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, cutoff_frequency: Union[float, DefaultType] = DEFAULT, high_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, low_freq_operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, minimum_should_match: Union[int, str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if cutoff_frequency is not DEFAULT: kwargs["cutoff_frequency"] = cutoff_frequency if high_freq_operator is not DEFAULT: kwargs["high_freq_operator"] = high_freq_operator if low_freq_operator is not DEFAULT: kwargs["low_freq_operator"] = low_freq_operator if minimum_should_match is not DEFAULT: kwargs["minimum_should_match"] = minimum_should_match if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class CoordsGeoBounds(AttrDict[Any]): """ :arg top: (required) :arg bottom: (required) :arg left: (required) :arg right: (required) """ top: Union[float, DefaultType] bottom: Union[float, DefaultType] left: Union[float, DefaultType] right: Union[float, DefaultType] def __init__( self, *, top: Union[float, DefaultType] = DEFAULT, bottom: Union[float, DefaultType] = DEFAULT, left: Union[float, DefaultType] = DEFAULT, right: Union[float, DefaultType] = DEFAULT, **kwargs: Any, ): if top is not DEFAULT: kwargs["top"] = top if bottom is not DEFAULT: kwargs["bottom"] = bottom if left is not DEFAULT: kwargs["left"] = left if right is not DEFAULT: kwargs["right"] = right super().__init__(kwargs) class CustomCategorizeTextAnalyzer(AttrDict[Any]): """ :arg char_filter: :arg tokenizer: :arg filter: """ char_filter: Union[Sequence[str], DefaultType] tokenizer: Union[str, DefaultType] filter: Union[Sequence[str], DefaultType] def __init__( self, *, char_filter: Union[Sequence[str], DefaultType] = DEFAULT, tokenizer: Union[str, DefaultType] = DEFAULT, filter: Union[Sequence[str], DefaultType] = DEFAULT, **kwargs: Any, ): if char_filter is not DEFAULT: kwargs["char_filter"] = char_filter if tokenizer is not DEFAULT: kwargs["tokenizer"] = tokenizer if filter is not DEFAULT: kwargs["filter"] = filter super().__init__(kwargs) class DenseVectorIndexOptions(AttrDict[Any]): """ :arg type: (required) The type of kNN algorithm to use. :arg confidence_interval: The confidence interval to use when quantizing the vectors. Can be any value between and including `0.90` and `1.0` or exactly `0`. When the value is `0`, this indicates that dynamic quantiles should be calculated for optimized quantization. When between `0.90` and `1.0`, this value restricts the values used when calculating the quantization thresholds. For example, a value of `0.95` will only use the middle `95%` of the values when calculating the quantization thresholds (e.g. the highest and lowest `2.5%` of values will be ignored). Defaults to `1/(dims + 1)` for `int8` quantized vectors and `0` for `int4` for dynamic quantile calculation. Only applicable to `int8_hnsw`, `int4_hnsw`, `int8_flat`, and `int4_flat` index types. :arg ef_construction: The number of candidates to track while assembling the list of nearest neighbors for each new node. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. Defaults to `100` if omitted. :arg m: The number of neighbors each node will be connected to in the HNSW graph. Only applicable to `hnsw`, `int8_hnsw`, `bbq_hnsw`, and `int4_hnsw` index types. Defaults to `16` if omitted. :arg rescore_vector: The rescore vector options. This is only applicable to `bbq_hnsw`, `int4_hnsw`, `int8_hnsw`, `bbq_flat`, `int4_flat`, and `int8_flat` index types. """ type: Union[ Literal[ "bbq_flat", "bbq_hnsw", "flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw", ], DefaultType, ] confidence_interval: Union[float, DefaultType] ef_construction: Union[int, DefaultType] m: Union[int, DefaultType] rescore_vector: Union[ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType ] def __init__( self, *, type: Union[ Literal[ "bbq_flat", "bbq_hnsw", "flat", "hnsw", "int4_flat", "int4_hnsw", "int8_flat", "int8_hnsw", ], DefaultType, ] = DEFAULT, confidence_interval: Union[float, DefaultType] = DEFAULT, ef_construction: Union[int, DefaultType] = DEFAULT, m: Union[int, DefaultType] = DEFAULT, rescore_vector: Union[ "DenseVectorIndexOptionsRescoreVector", Dict[str, Any], DefaultType ] = DEFAULT, **kwargs: Any, ): if type is not DEFAULT: kwargs["type"] = type if confidence_interval is not DEFAULT: kwargs["confidence_interval"] = confidence_interval if ef_construction is not DEFAULT: kwargs["ef_construction"] = ef_construction if m is not DEFAULT: kwargs["m"] = m if rescore_vector is not DEFAULT: kwargs["rescore_vector"] = rescore_vector super().__init__(kwargs) class DenseVectorIndexOptionsRescoreVector(AttrDict[Any]): """ :arg oversample: (required) The oversampling factor to use when searching for the nearest neighbor. This is only applicable to the quantized formats: `bbq_*`, `int4_*`, and `int8_*`. When provided, `oversample * k` vectors will be gathered and then their scores will be re-computed with the original vectors. valid values are between `1.0` and `10.0` (inclusive), or `0` exactly to disable oversampling. """ oversample: Union[float, DefaultType] def __init__( self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any ): if oversample is not DEFAULT: kwargs["oversample"] = oversample super().__init__(kwargs) class EmptyObject(AttrDict[Any]): """ For empty Class assignments """ def __init__(self, **kwargs: Any): super().__init__(kwargs) class EwmaModelSettings(AttrDict[Any]): """ :arg alpha: """ alpha: Union[float, DefaultType] def __init__(self, *, alpha: Union[float, DefaultType] = DEFAULT, **kwargs: Any): if alpha is not DEFAULT: kwargs["alpha"] = alpha super().__init__(kwargs) class ExtendedBounds(AttrDict[Any]): """ :arg max: Maximum value for the bound. :arg min: Minimum value for the bound. """ max: Any min: Any def __init__(self, *, max: Any = DEFAULT, min: Any = DEFAULT, **kwargs: Any): if max is not DEFAULT: kwargs["max"] = max if min is not DEFAULT: kwargs["min"] = min super().__init__(kwargs) class FieldAndFormat(AttrDict[Any]): """ A reference to a field with formatting instructions on how to return the value :arg field: (required) A wildcard pattern. The request returns values for field names matching this pattern. :arg format: The format in which the values are returned. :arg include_unmapped: """ field: Union[str, InstrumentedField, DefaultType] format: Union[str, DefaultType] include_unmapped: Union[bool, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, format: Union[str, DefaultType] = DEFAULT, include_unmapped: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if format is not DEFAULT: kwargs["format"] = format if include_unmapped is not DEFAULT: kwargs["include_unmapped"] = include_unmapped super().__init__(kwargs) class FieldCollapse(AttrDict[Any]): """ :arg field: (required) The field to collapse the result set on :arg inner_hits: The number of inner hits and their sort order :arg max_concurrent_group_searches: The number of concurrent requests allowed to retrieve the inner_hits per group :arg collapse: """ field: Union[str, InstrumentedField, DefaultType] inner_hits: Union[ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType ] max_concurrent_group_searches: Union[int, DefaultType] collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, inner_hits: Union[ "InnerHits", Sequence["InnerHits"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, max_concurrent_group_searches: Union[int, DefaultType] = DEFAULT, collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if inner_hits is not DEFAULT: kwargs["inner_hits"] = inner_hits if max_concurrent_group_searches is not DEFAULT: kwargs["max_concurrent_group_searches"] = max_concurrent_group_searches if collapse is not DEFAULT: kwargs["collapse"] = collapse super().__init__(kwargs) class FieldLookup(AttrDict[Any]): """ :arg id: (required) `id` of the document. :arg index: Index from which to retrieve the document. :arg path: Name of the field. :arg routing: Custom routing value. """ id: Union[str, DefaultType] index: Union[str, DefaultType] path: Union[str, InstrumentedField, DefaultType] routing: Union[str, DefaultType] def __init__( self, *, id: Union[str, DefaultType] = DEFAULT, index: Union[str, DefaultType] = DEFAULT, path: Union[str, InstrumentedField, DefaultType] = DEFAULT, routing: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if id is not DEFAULT: kwargs["id"] = id if index is not DEFAULT: kwargs["index"] = index if path is not DEFAULT: kwargs["path"] = str(path) if routing is not DEFAULT: kwargs["routing"] = routing super().__init__(kwargs) class FieldSort(AttrDict[Any]): """ :arg missing: :arg mode: :arg nested: :arg order: :arg unmapped_type: :arg numeric_type: :arg format: """ missing: Union[str, int, float, bool, DefaultType] mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] nested: Union["NestedSortValue", Dict[str, Any], DefaultType] order: Union[Literal["asc", "desc"], DefaultType] unmapped_type: Union[ Literal[ "none", "geo_point", "geo_shape", "ip", "binary", "keyword", "text", "search_as_you_type", "date", "date_nanos", "boolean", "completion", "nested", "object", "passthrough", "version", "murmur3", "token_count", "percolator", "integer", "long", "short", "byte", "float", "half_float", "scaled_float", "double", "integer_range", "float_range", "long_range", "double_range", "date_range", "ip_range", "alias", "join", "rank_feature", "rank_features", "flattened", "shape", "histogram", "constant_keyword", "counted_keyword", "aggregate_metric_double", "dense_vector", "semantic_text", "sparse_vector", "match_only_text", "icu_collation_keyword", ], DefaultType, ] numeric_type: Union[Literal["long", "double", "date", "date_nanos"], DefaultType] format: Union[str, DefaultType] def __init__( self, *, missing: Union[str, int, float, bool, DefaultType] = DEFAULT, mode: Union[ Literal["min", "max", "sum", "avg", "median"], DefaultType ] = DEFAULT, nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, unmapped_type: Union[ Literal[ "none", "geo_point", "geo_shape", "ip", "binary", "keyword", "text", "search_as_you_type", "date", "date_nanos", "boolean", "completion", "nested", "object", "passthrough", "version", "murmur3", "token_count", "percolator", "integer", "long", "short", "byte", "float", "half_float", "scaled_float", "double", "integer_range", "float_range", "long_range", "double_range", "date_range", "ip_range", "alias", "join", "rank_feature", "rank_features", "flattened", "shape", "histogram", "constant_keyword", "counted_keyword", "aggregate_metric_double", "dense_vector", "semantic_text", "sparse_vector", "match_only_text", "icu_collation_keyword", ], DefaultType, ] = DEFAULT, numeric_type: Union[ Literal["long", "double", "date", "date_nanos"], DefaultType ] = DEFAULT, format: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if missing is not DEFAULT: kwargs["missing"] = missing if mode is not DEFAULT: kwargs["mode"] = mode if nested is not DEFAULT: kwargs["nested"] = nested if order is not DEFAULT: kwargs["order"] = order if unmapped_type is not DEFAULT: kwargs["unmapped_type"] = unmapped_type if numeric_type is not DEFAULT: kwargs["numeric_type"] = numeric_type if format is not DEFAULT: kwargs["format"] = format super().__init__(kwargs) class FielddataFrequencyFilter(AttrDict[Any]): """ :arg max: (required) :arg min: (required) :arg min_segment_size: (required) """ max: Union[float, DefaultType] min: Union[float, DefaultType] min_segment_size: Union[int, DefaultType] def __init__( self, *, max: Union[float, DefaultType] = DEFAULT, min: Union[float, DefaultType] = DEFAULT, min_segment_size: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if max is not DEFAULT: kwargs["max"] = max if min is not DEFAULT: kwargs["min"] = min if min_segment_size is not DEFAULT: kwargs["min_segment_size"] = min_segment_size super().__init__(kwargs) class FrequentItemSetsField(AttrDict[Any]): """ :arg field: (required) :arg exclude: Values to exclude. Can be regular expression strings or arrays of strings of exact terms. :arg include: Values to include. Can be regular expression strings or arrays of strings of exact terms. """ field: Union[str, InstrumentedField, DefaultType] exclude: Union[str, Sequence[str], DefaultType] include: Union[str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, exclude: Union[str, Sequence[str], DefaultType] = DEFAULT, include: Union[ str, Sequence[str], "TermsPartition", Dict[str, Any], DefaultType ] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if exclude is not DEFAULT: kwargs["exclude"] = exclude if include is not DEFAULT: kwargs["include"] = include super().__init__(kwargs) class FuzzyQuery(AttrDict[Any]): """ :arg value: (required) Term you wish to find in the provided field. :arg max_expansions: Maximum number of variations created. Defaults to `50` if omitted. :arg prefix_length: Number of beginning characters left unchanged when creating expansions. :arg rewrite: Number of beginning characters left unchanged when creating expansions. Defaults to `constant_score` if omitted. :arg transpositions: Indicates whether edits include transpositions of two adjacent characters (for example `ab` to `ba`). Defaults to `True` if omitted. :arg fuzziness: Maximum edit distance allowed for matching. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ value: Union[str, float, bool, DefaultType] max_expansions: Union[int, DefaultType] prefix_length: Union[int, DefaultType] rewrite: Union[str, DefaultType] transpositions: Union[bool, DefaultType] fuzziness: Union[str, int, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, value: Union[str, float, bool, DefaultType] = DEFAULT, max_expansions: Union[int, DefaultType] = DEFAULT, prefix_length: Union[int, DefaultType] = DEFAULT, rewrite: Union[str, DefaultType] = DEFAULT, transpositions: Union[bool, DefaultType] = DEFAULT, fuzziness: Union[str, int, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if max_expansions is not DEFAULT: kwargs["max_expansions"] = max_expansions if prefix_length is not DEFAULT: kwargs["prefix_length"] = prefix_length if rewrite is not DEFAULT: kwargs["rewrite"] = rewrite if transpositions is not DEFAULT: kwargs["transpositions"] = transpositions if fuzziness is not DEFAULT: kwargs["fuzziness"] = fuzziness if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class GeoDistanceSort(AttrDict[Any]): """ :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg mode: :arg distance_type: :arg ignore_unmapped: :arg order: :arg unit: :arg nested: """ _field: Union[str, "InstrumentedField", "DefaultType"] _value: Union[ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str], Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]], Dict[str, Any], "DefaultType", ] mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] distance_type: Union[Literal["arc", "plane"], DefaultType] ignore_unmapped: Union[bool, DefaultType] order: Union[Literal["asc", "desc"], DefaultType] unit: Union[ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType ] nested: Union["NestedSortValue", Dict[str, Any], DefaultType] def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union[ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str], Sequence[ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] ], Dict[str, Any], "DefaultType", ] = DEFAULT, *, mode: Union[ Literal["min", "max", "sum", "avg", "median"], DefaultType ] = DEFAULT, distance_type: Union[Literal["arc", "plane"], DefaultType] = DEFAULT, ignore_unmapped: Union[bool, DefaultType] = DEFAULT, order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, unit: Union[ Literal["in", "ft", "yd", "mi", "nmi", "km", "m", "cm", "mm"], DefaultType ] = DEFAULT, nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value if mode is not DEFAULT: kwargs["mode"] = mode if distance_type is not DEFAULT: kwargs["distance_type"] = distance_type if ignore_unmapped is not DEFAULT: kwargs["ignore_unmapped"] = ignore_unmapped if order is not DEFAULT: kwargs["order"] = order if unit is not DEFAULT: kwargs["unit"] = unit if nested is not DEFAULT: kwargs["nested"] = nested super().__init__(kwargs) class GeoGridQuery(AttrDict[Any]): """ :arg geotile: :arg geohash: :arg geohex: :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ geotile: Union[str, DefaultType] geohash: Union[str, DefaultType] geohex: Union[str, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, geotile: Union[str, DefaultType] = DEFAULT, geohash: Union[str, DefaultType] = DEFAULT, geohex: Union[str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if geotile is not DEFAULT: kwargs["geotile"] = geotile if geohash is not DEFAULT: kwargs["geohash"] = geohash if geohex is not DEFAULT: kwargs["geohex"] = geohex if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class GeoHashLocation(AttrDict[Any]): """ :arg geohash: (required) """ geohash: Union[str, DefaultType] def __init__(self, *, geohash: Union[str, DefaultType] = DEFAULT, **kwargs: Any): if geohash is not DEFAULT: kwargs["geohash"] = geohash super().__init__(kwargs) class GeoLinePoint(AttrDict[Any]): """ :arg field: (required) The name of the geo_point field. """ field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) super().__init__(kwargs) class GeoLineSort(AttrDict[Any]): """ :arg field: (required) The name of the numeric field to use as the sort key for ordering the points. """ field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) super().__init__(kwargs) class GeoPolygonPoints(AttrDict[Any]): """ :arg points: (required) """ points: Union[ Sequence[Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str]], Dict[str, Any], DefaultType, ] def __init__( self, *, points: Union[ Sequence[ Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] ], Dict[str, Any], DefaultType, ] = DEFAULT, **kwargs: Any, ): if points is not DEFAULT: kwargs["points"] = points super().__init__(kwargs) class GeoShapeFieldQuery(AttrDict[Any]): """ :arg shape: :arg indexed_shape: Query using an indexed shape retrieved from the the specified document and path. :arg relation: Spatial relation operator used to search a geo field. Defaults to `intersects` if omitted. """ shape: Any indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] relation: Union[ Literal["intersects", "disjoint", "within", "contains"], DefaultType ] def __init__( self, *, shape: Any = DEFAULT, indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, relation: Union[ Literal["intersects", "disjoint", "within", "contains"], DefaultType ] = DEFAULT, **kwargs: Any, ): if shape is not DEFAULT: kwargs["shape"] = shape if indexed_shape is not DEFAULT: kwargs["indexed_shape"] = indexed_shape if relation is not DEFAULT: kwargs["relation"] = relation super().__init__(kwargs) class GoogleNormalizedDistanceHeuristic(AttrDict[Any]): """ :arg background_is_superset: Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. """ background_is_superset: Union[bool, DefaultType] def __init__( self, *, background_is_superset: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if background_is_superset is not DEFAULT: kwargs["background_is_superset"] = background_is_superset super().__init__(kwargs) class HdrMethod(AttrDict[Any]): """ :arg number_of_significant_value_digits: Specifies the resolution of values for the histogram in number of significant digits. """ number_of_significant_value_digits: Union[int, DefaultType] def __init__( self, *, number_of_significant_value_digits: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if number_of_significant_value_digits is not DEFAULT: kwargs["number_of_significant_value_digits"] = ( number_of_significant_value_digits ) super().__init__(kwargs) class Highlight(AttrDict[Any]): """ :arg fields: (required) :arg encoder: :arg type: :arg boundary_chars: A string that contains each boundary character. Defaults to `.,!? \t\n` if omitted. :arg boundary_max_scan: How far to scan for boundary characters. Defaults to `20` if omitted. :arg boundary_scanner: Specifies how to break the highlighted fragments: chars, sentence, or word. Only valid for the unified and fvh highlighters. Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. :arg boundary_scanner_locale: Controls which locale is used to search for sentence and word boundaries. This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. Defaults to `Locale.ROOT` if omitted. :arg force_source: :arg fragmenter: Specifies how text should be broken up in highlight snippets: `simple` or `span`. Only valid for the `plain` highlighter. Defaults to `span` if omitted. :arg fragment_size: The size of the highlighted fragment in characters. Defaults to `100` if omitted. :arg highlight_filter: :arg highlight_query: Highlight matches for a query other than the search query. This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. :arg max_fragment_length: :arg max_analyzed_offset: If set to a non-negative value, highlighting stops at this defined maximum limit. The rest of the text is not processed, thus not highlighted and no error is returned The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. :arg no_match_size: The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. :arg number_of_fragments: The maximum number of fragments to return. If the number of fragments is set to `0`, no fragments are returned. Instead, the entire field contents are highlighted and returned. This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. If `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults to `5` if omitted. :arg options: :arg order: Sorts highlighted fragments by score when set to `score`. By default, fragments will be output in the order they appear in the field (order: `none`). Setting this option to `score` will output the most relevant fragments first. Each highlighter applies its own logic to compute relevancy scores. Defaults to `none` if omitted. :arg phrase_limit: Controls the number of matching phrases in a document that are considered. Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. Only supported by the `fvh` highlighter. Defaults to `256` if omitted. :arg post_tags: Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `` and `` tags. :arg pre_tags: Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `` and `` tags. :arg require_field_match: By default, only fields that contains a query match are highlighted. Set to `false` to highlight all fields. Defaults to `True` if omitted. :arg tags_schema: Set to `styled` to use the built-in tag schema. """ fields: Union[ Mapping[Union[str, InstrumentedField], "HighlightField"], Sequence[Mapping[Union[str, InstrumentedField], "HighlightField"]], Dict[str, Any], DefaultType, ] encoder: Union[Literal["default", "html"], DefaultType] type: Union[Literal["plain", "fvh", "unified"], DefaultType] boundary_chars: Union[str, DefaultType] boundary_max_scan: Union[int, DefaultType] boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] boundary_scanner_locale: Union[str, DefaultType] force_source: Union[bool, DefaultType] fragmenter: Union[Literal["simple", "span"], DefaultType] fragment_size: Union[int, DefaultType] highlight_filter: Union[bool, DefaultType] highlight_query: Union[Query, DefaultType] max_fragment_length: Union[int, DefaultType] max_analyzed_offset: Union[int, DefaultType] no_match_size: Union[int, DefaultType] number_of_fragments: Union[int, DefaultType] options: Union[Mapping[str, Any], DefaultType] order: Union[Literal["score"], DefaultType] phrase_limit: Union[int, DefaultType] post_tags: Union[Sequence[str], DefaultType] pre_tags: Union[Sequence[str], DefaultType] require_field_match: Union[bool, DefaultType] tags_schema: Union[Literal["styled"], DefaultType] def __init__( self, *, fields: Union[ Mapping[Union[str, InstrumentedField], "HighlightField"], Sequence[Mapping[Union[str, InstrumentedField], "HighlightField"]], Dict[str, Any], DefaultType, ] = DEFAULT, encoder: Union[Literal["default", "html"], DefaultType] = DEFAULT, type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, boundary_chars: Union[str, DefaultType] = DEFAULT, boundary_max_scan: Union[int, DefaultType] = DEFAULT, boundary_scanner: Union[ Literal["chars", "sentence", "word"], DefaultType ] = DEFAULT, boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, force_source: Union[bool, DefaultType] = DEFAULT, fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, fragment_size: Union[int, DefaultType] = DEFAULT, highlight_filter: Union[bool, DefaultType] = DEFAULT, highlight_query: Union[Query, DefaultType] = DEFAULT, max_fragment_length: Union[int, DefaultType] = DEFAULT, max_analyzed_offset: Union[int, DefaultType] = DEFAULT, no_match_size: Union[int, DefaultType] = DEFAULT, number_of_fragments: Union[int, DefaultType] = DEFAULT, options: Union[Mapping[str, Any], DefaultType] = DEFAULT, order: Union[Literal["score"], DefaultType] = DEFAULT, phrase_limit: Union[int, DefaultType] = DEFAULT, post_tags: Union[Sequence[str], DefaultType] = DEFAULT, pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, require_field_match: Union[bool, DefaultType] = DEFAULT, tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, **kwargs: Any, ): if fields is not DEFAULT: kwargs["fields"] = str(fields) if encoder is not DEFAULT: kwargs["encoder"] = encoder if type is not DEFAULT: kwargs["type"] = type if boundary_chars is not DEFAULT: kwargs["boundary_chars"] = boundary_chars if boundary_max_scan is not DEFAULT: kwargs["boundary_max_scan"] = boundary_max_scan if boundary_scanner is not DEFAULT: kwargs["boundary_scanner"] = boundary_scanner if boundary_scanner_locale is not DEFAULT: kwargs["boundary_scanner_locale"] = boundary_scanner_locale if force_source is not DEFAULT: kwargs["force_source"] = force_source if fragmenter is not DEFAULT: kwargs["fragmenter"] = fragmenter if fragment_size is not DEFAULT: kwargs["fragment_size"] = fragment_size if highlight_filter is not DEFAULT: kwargs["highlight_filter"] = highlight_filter if highlight_query is not DEFAULT: kwargs["highlight_query"] = highlight_query if max_fragment_length is not DEFAULT: kwargs["max_fragment_length"] = max_fragment_length if max_analyzed_offset is not DEFAULT: kwargs["max_analyzed_offset"] = max_analyzed_offset if no_match_size is not DEFAULT: kwargs["no_match_size"] = no_match_size if number_of_fragments is not DEFAULT: kwargs["number_of_fragments"] = number_of_fragments if options is not DEFAULT: kwargs["options"] = options if order is not DEFAULT: kwargs["order"] = order if phrase_limit is not DEFAULT: kwargs["phrase_limit"] = phrase_limit if post_tags is not DEFAULT: kwargs["post_tags"] = post_tags if pre_tags is not DEFAULT: kwargs["pre_tags"] = pre_tags if require_field_match is not DEFAULT: kwargs["require_field_match"] = require_field_match if tags_schema is not DEFAULT: kwargs["tags_schema"] = tags_schema super().__init__(kwargs) class HighlightField(AttrDict[Any]): """ :arg fragment_offset: :arg matched_fields: :arg type: :arg boundary_chars: A string that contains each boundary character. Defaults to `.,!? \t\n` if omitted. :arg boundary_max_scan: How far to scan for boundary characters. Defaults to `20` if omitted. :arg boundary_scanner: Specifies how to break the highlighted fragments: chars, sentence, or word. Only valid for the unified and fvh highlighters. Defaults to `sentence` for the `unified` highlighter. Defaults to `chars` for the `fvh` highlighter. :arg boundary_scanner_locale: Controls which locale is used to search for sentence and word boundaries. This parameter takes a form of a language tag, for example: `"en-US"`, `"fr-FR"`, `"ja-JP"`. Defaults to `Locale.ROOT` if omitted. :arg force_source: :arg fragmenter: Specifies how text should be broken up in highlight snippets: `simple` or `span`. Only valid for the `plain` highlighter. Defaults to `span` if omitted. :arg fragment_size: The size of the highlighted fragment in characters. Defaults to `100` if omitted. :arg highlight_filter: :arg highlight_query: Highlight matches for a query other than the search query. This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. :arg max_fragment_length: :arg max_analyzed_offset: If set to a non-negative value, highlighting stops at this defined maximum limit. The rest of the text is not processed, thus not highlighted and no error is returned The `max_analyzed_offset` query setting does not override the `index.highlight.max_analyzed_offset` setting, which prevails when it’s set to lower value than the query setting. :arg no_match_size: The amount of text you want to return from the beginning of the field if there are no matching fragments to highlight. :arg number_of_fragments: The maximum number of fragments to return. If the number of fragments is set to `0`, no fragments are returned. Instead, the entire field contents are highlighted and returned. This can be handy when you need to highlight short texts such as a title or address, but fragmentation is not required. If `number_of_fragments` is `0`, `fragment_size` is ignored. Defaults to `5` if omitted. :arg options: :arg order: Sorts highlighted fragments by score when set to `score`. By default, fragments will be output in the order they appear in the field (order: `none`). Setting this option to `score` will output the most relevant fragments first. Each highlighter applies its own logic to compute relevancy scores. Defaults to `none` if omitted. :arg phrase_limit: Controls the number of matching phrases in a document that are considered. Prevents the `fvh` highlighter from analyzing too many phrases and consuming too much memory. When using `matched_fields`, `phrase_limit` phrases per matched field are considered. Raising the limit increases query time and consumes more memory. Only supported by the `fvh` highlighter. Defaults to `256` if omitted. :arg post_tags: Use in conjunction with `pre_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `` and `` tags. :arg pre_tags: Use in conjunction with `post_tags` to define the HTML tags to use for the highlighted text. By default, highlighted text is wrapped in `` and `` tags. :arg require_field_match: By default, only fields that contains a query match are highlighted. Set to `false` to highlight all fields. Defaults to `True` if omitted. :arg tags_schema: Set to `styled` to use the built-in tag schema. """ fragment_offset: Union[int, DefaultType] matched_fields: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] type: Union[Literal["plain", "fvh", "unified"], DefaultType] boundary_chars: Union[str, DefaultType] boundary_max_scan: Union[int, DefaultType] boundary_scanner: Union[Literal["chars", "sentence", "word"], DefaultType] boundary_scanner_locale: Union[str, DefaultType] force_source: Union[bool, DefaultType] fragmenter: Union[Literal["simple", "span"], DefaultType] fragment_size: Union[int, DefaultType] highlight_filter: Union[bool, DefaultType] highlight_query: Union[Query, DefaultType] max_fragment_length: Union[int, DefaultType] max_analyzed_offset: Union[int, DefaultType] no_match_size: Union[int, DefaultType] number_of_fragments: Union[int, DefaultType] options: Union[Mapping[str, Any], DefaultType] order: Union[Literal["score"], DefaultType] phrase_limit: Union[int, DefaultType] post_tags: Union[Sequence[str], DefaultType] pre_tags: Union[Sequence[str], DefaultType] require_field_match: Union[bool, DefaultType] tags_schema: Union[Literal["styled"], DefaultType] def __init__( self, *, fragment_offset: Union[int, DefaultType] = DEFAULT, matched_fields: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] = DEFAULT, type: Union[Literal["plain", "fvh", "unified"], DefaultType] = DEFAULT, boundary_chars: Union[str, DefaultType] = DEFAULT, boundary_max_scan: Union[int, DefaultType] = DEFAULT, boundary_scanner: Union[ Literal["chars", "sentence", "word"], DefaultType ] = DEFAULT, boundary_scanner_locale: Union[str, DefaultType] = DEFAULT, force_source: Union[bool, DefaultType] = DEFAULT, fragmenter: Union[Literal["simple", "span"], DefaultType] = DEFAULT, fragment_size: Union[int, DefaultType] = DEFAULT, highlight_filter: Union[bool, DefaultType] = DEFAULT, highlight_query: Union[Query, DefaultType] = DEFAULT, max_fragment_length: Union[int, DefaultType] = DEFAULT, max_analyzed_offset: Union[int, DefaultType] = DEFAULT, no_match_size: Union[int, DefaultType] = DEFAULT, number_of_fragments: Union[int, DefaultType] = DEFAULT, options: Union[Mapping[str, Any], DefaultType] = DEFAULT, order: Union[Literal["score"], DefaultType] = DEFAULT, phrase_limit: Union[int, DefaultType] = DEFAULT, post_tags: Union[Sequence[str], DefaultType] = DEFAULT, pre_tags: Union[Sequence[str], DefaultType] = DEFAULT, require_field_match: Union[bool, DefaultType] = DEFAULT, tags_schema: Union[Literal["styled"], DefaultType] = DEFAULT, **kwargs: Any, ): if fragment_offset is not DEFAULT: kwargs["fragment_offset"] = fragment_offset if matched_fields is not DEFAULT: kwargs["matched_fields"] = str(matched_fields) if type is not DEFAULT: kwargs["type"] = type if boundary_chars is not DEFAULT: kwargs["boundary_chars"] = boundary_chars if boundary_max_scan is not DEFAULT: kwargs["boundary_max_scan"] = boundary_max_scan if boundary_scanner is not DEFAULT: kwargs["boundary_scanner"] = boundary_scanner if boundary_scanner_locale is not DEFAULT: kwargs["boundary_scanner_locale"] = boundary_scanner_locale if force_source is not DEFAULT: kwargs["force_source"] = force_source if fragmenter is not DEFAULT: kwargs["fragmenter"] = fragmenter if fragment_size is not DEFAULT: kwargs["fragment_size"] = fragment_size if highlight_filter is not DEFAULT: kwargs["highlight_filter"] = highlight_filter if highlight_query is not DEFAULT: kwargs["highlight_query"] = highlight_query if max_fragment_length is not DEFAULT: kwargs["max_fragment_length"] = max_fragment_length if max_analyzed_offset is not DEFAULT: kwargs["max_analyzed_offset"] = max_analyzed_offset if no_match_size is not DEFAULT: kwargs["no_match_size"] = no_match_size if number_of_fragments is not DEFAULT: kwargs["number_of_fragments"] = number_of_fragments if options is not DEFAULT: kwargs["options"] = options if order is not DEFAULT: kwargs["order"] = order if phrase_limit is not DEFAULT: kwargs["phrase_limit"] = phrase_limit if post_tags is not DEFAULT: kwargs["post_tags"] = post_tags if pre_tags is not DEFAULT: kwargs["pre_tags"] = pre_tags if require_field_match is not DEFAULT: kwargs["require_field_match"] = require_field_match if tags_schema is not DEFAULT: kwargs["tags_schema"] = tags_schema super().__init__(kwargs) class HoltLinearModelSettings(AttrDict[Any]): """ :arg alpha: :arg beta: """ alpha: Union[float, DefaultType] beta: Union[float, DefaultType] def __init__( self, *, alpha: Union[float, DefaultType] = DEFAULT, beta: Union[float, DefaultType] = DEFAULT, **kwargs: Any, ): if alpha is not DEFAULT: kwargs["alpha"] = alpha if beta is not DEFAULT: kwargs["beta"] = beta super().__init__(kwargs) class HoltWintersModelSettings(AttrDict[Any]): """ :arg alpha: :arg beta: :arg gamma: :arg pad: :arg period: :arg type: """ alpha: Union[float, DefaultType] beta: Union[float, DefaultType] gamma: Union[float, DefaultType] pad: Union[bool, DefaultType] period: Union[int, DefaultType] type: Union[Literal["add", "mult"], DefaultType] def __init__( self, *, alpha: Union[float, DefaultType] = DEFAULT, beta: Union[float, DefaultType] = DEFAULT, gamma: Union[float, DefaultType] = DEFAULT, pad: Union[bool, DefaultType] = DEFAULT, period: Union[int, DefaultType] = DEFAULT, type: Union[Literal["add", "mult"], DefaultType] = DEFAULT, **kwargs: Any, ): if alpha is not DEFAULT: kwargs["alpha"] = alpha if beta is not DEFAULT: kwargs["beta"] = beta if gamma is not DEFAULT: kwargs["gamma"] = gamma if pad is not DEFAULT: kwargs["pad"] = pad if period is not DEFAULT: kwargs["period"] = period if type is not DEFAULT: kwargs["type"] = type super().__init__(kwargs) class InferenceConfigContainer(AttrDict[Any]): """ :arg regression: Regression configuration for inference. :arg classification: Classification configuration for inference. """ regression: Union["RegressionInferenceOptions", Dict[str, Any], DefaultType] classification: Union["ClassificationInferenceOptions", Dict[str, Any], DefaultType] def __init__( self, *, regression: Union[ "RegressionInferenceOptions", Dict[str, Any], DefaultType ] = DEFAULT, classification: Union[ "ClassificationInferenceOptions", Dict[str, Any], DefaultType ] = DEFAULT, **kwargs: Any, ): if regression is not DEFAULT: kwargs["regression"] = regression if classification is not DEFAULT: kwargs["classification"] = classification super().__init__(kwargs) class InnerHits(AttrDict[Any]): """ :arg name: The name for the particular inner hit definition in the response. Useful when a search request contains multiple inner hits. :arg size: The maximum number of hits to return per `inner_hits`. Defaults to `3` if omitted. :arg from: Inner hit starting document offset. :arg collapse: :arg docvalue_fields: :arg explain: :arg highlight: :arg ignore_unmapped: :arg script_fields: :arg seq_no_primary_term: :arg fields: :arg sort: How the inner hits should be sorted per `inner_hits`. By default, inner hits are sorted by score. :arg _source: :arg stored_fields: :arg track_scores: :arg version: """ name: Union[str, DefaultType] size: Union[int, DefaultType] from_: Union[int, DefaultType] collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] docvalue_fields: Union[ Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType ] explain: Union[bool, DefaultType] highlight: Union["Highlight", Dict[str, Any], DefaultType] ignore_unmapped: Union[bool, DefaultType] script_fields: Union[ Mapping[Union[str, InstrumentedField], "ScriptField"], Dict[str, Any], DefaultType, ] seq_no_primary_term: Union[bool, DefaultType] fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] sort: Union[ Union[Union[str, InstrumentedField], "SortOptions"], Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], Dict[str, Any], DefaultType, ] _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] stored_fields: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] track_scores: Union[bool, DefaultType] version: Union[bool, DefaultType] def __init__( self, *, name: Union[str, DefaultType] = DEFAULT, size: Union[int, DefaultType] = DEFAULT, from_: Union[int, DefaultType] = DEFAULT, collapse: Union["FieldCollapse", Dict[str, Any], DefaultType] = DEFAULT, docvalue_fields: Union[ Sequence["FieldAndFormat"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, explain: Union[bool, DefaultType] = DEFAULT, highlight: Union["Highlight", Dict[str, Any], DefaultType] = DEFAULT, ignore_unmapped: Union[bool, DefaultType] = DEFAULT, script_fields: Union[ Mapping[Union[str, InstrumentedField], "ScriptField"], Dict[str, Any], DefaultType, ] = DEFAULT, seq_no_primary_term: Union[bool, DefaultType] = DEFAULT, fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT, sort: Union[ Union[Union[str, InstrumentedField], "SortOptions"], Sequence[Union[Union[str, InstrumentedField], "SortOptions"]], Dict[str, Any], DefaultType, ] = DEFAULT, _source: Union[bool, "SourceFilter", Dict[str, Any], DefaultType] = DEFAULT, stored_fields: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] = DEFAULT, track_scores: Union[bool, DefaultType] = DEFAULT, version: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if name is not DEFAULT: kwargs["name"] = name if size is not DEFAULT: kwargs["size"] = size if from_ is not DEFAULT: kwargs["from_"] = from_ if collapse is not DEFAULT: kwargs["collapse"] = collapse if docvalue_fields is not DEFAULT: kwargs["docvalue_fields"] = docvalue_fields if explain is not DEFAULT: kwargs["explain"] = explain if highlight is not DEFAULT: kwargs["highlight"] = highlight if ignore_unmapped is not DEFAULT: kwargs["ignore_unmapped"] = ignore_unmapped if script_fields is not DEFAULT: kwargs["script_fields"] = str(script_fields) if seq_no_primary_term is not DEFAULT: kwargs["seq_no_primary_term"] = seq_no_primary_term if fields is not DEFAULT: kwargs["fields"] = str(fields) if sort is not DEFAULT: kwargs["sort"] = str(sort) if _source is not DEFAULT: kwargs["_source"] = _source if stored_fields is not DEFAULT: kwargs["stored_fields"] = str(stored_fields) if track_scores is not DEFAULT: kwargs["track_scores"] = track_scores if version is not DEFAULT: kwargs["version"] = version super().__init__(kwargs) class IntervalsAllOf(AttrDict[Any]): """ :arg intervals: (required) An array of rules to combine. All rules must produce a match in a document for the overall source to match. :arg max_gaps: Maximum number of positions between the matching terms. Intervals produced by the rules further apart than this are not considered matches. Defaults to `-1` if omitted. :arg ordered: If `true`, intervals produced by the rules should appear in the order in which they are specified. :arg filter: Rule used to filter returned intervals. """ intervals: Union[ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType ] max_gaps: Union[int, DefaultType] ordered: Union[bool, DefaultType] filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] def __init__( self, *, intervals: Union[ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, max_gaps: Union[int, DefaultType] = DEFAULT, ordered: Union[bool, DefaultType] = DEFAULT, filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if intervals is not DEFAULT: kwargs["intervals"] = intervals if max_gaps is not DEFAULT: kwargs["max_gaps"] = max_gaps if ordered is not DEFAULT: kwargs["ordered"] = ordered if filter is not DEFAULT: kwargs["filter"] = filter super().__init__(kwargs) class IntervalsAnyOf(AttrDict[Any]): """ :arg intervals: (required) An array of rules to match. :arg filter: Rule used to filter returned intervals. """ intervals: Union[ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType ] filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] def __init__( self, *, intervals: Union[ Sequence["IntervalsContainer"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if intervals is not DEFAULT: kwargs["intervals"] = intervals if filter is not DEFAULT: kwargs["filter"] = filter super().__init__(kwargs) class IntervalsContainer(AttrDict[Any]): """ :arg all_of: Returns matches that span a combination of other rules. :arg any_of: Returns intervals produced by any of its sub-rules. :arg fuzzy: Matches analyzed text. :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. :arg range: :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. """ all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] range: Union["IntervalsRange", Dict[str, Any], DefaultType] regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] def __init__( self, *, all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if all_of is not DEFAULT: kwargs["all_of"] = all_of if any_of is not DEFAULT: kwargs["any_of"] = any_of if fuzzy is not DEFAULT: kwargs["fuzzy"] = fuzzy if match is not DEFAULT: kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix if range is not DEFAULT: kwargs["range"] = range if regexp is not DEFAULT: kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard super().__init__(kwargs) class IntervalsFilter(AttrDict[Any]): """ :arg after: Query used to return intervals that follow an interval from the `filter` rule. :arg before: Query used to return intervals that occur before an interval from the `filter` rule. :arg contained_by: Query used to return intervals contained by an interval from the `filter` rule. :arg containing: Query used to return intervals that contain an interval from the `filter` rule. :arg not_contained_by: Query used to return intervals that are **not** contained by an interval from the `filter` rule. :arg not_containing: Query used to return intervals that do **not** contain an interval from the `filter` rule. :arg not_overlapping: Query used to return intervals that do **not** overlap with an interval from the `filter` rule. :arg overlapping: Query used to return intervals that overlap with an interval from the `filter` rule. :arg script: Script used to return matching documents. This script must return a boolean value: `true` or `false`. """ after: Union["IntervalsContainer", Dict[str, Any], DefaultType] before: Union["IntervalsContainer", Dict[str, Any], DefaultType] contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] not_contained_by: Union["IntervalsContainer", Dict[str, Any], DefaultType] not_containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] not_overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] script: Union["Script", Dict[str, Any], DefaultType] def __init__( self, *, after: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, before: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, contained_by: Union[ "IntervalsContainer", Dict[str, Any], DefaultType ] = DEFAULT, containing: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, not_contained_by: Union[ "IntervalsContainer", Dict[str, Any], DefaultType ] = DEFAULT, not_containing: Union[ "IntervalsContainer", Dict[str, Any], DefaultType ] = DEFAULT, not_overlapping: Union[ "IntervalsContainer", Dict[str, Any], DefaultType ] = DEFAULT, overlapping: Union["IntervalsContainer", Dict[str, Any], DefaultType] = DEFAULT, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if after is not DEFAULT: kwargs["after"] = after if before is not DEFAULT: kwargs["before"] = before if contained_by is not DEFAULT: kwargs["contained_by"] = contained_by if containing is not DEFAULT: kwargs["containing"] = containing if not_contained_by is not DEFAULT: kwargs["not_contained_by"] = not_contained_by if not_containing is not DEFAULT: kwargs["not_containing"] = not_containing if not_overlapping is not DEFAULT: kwargs["not_overlapping"] = not_overlapping if overlapping is not DEFAULT: kwargs["overlapping"] = overlapping if script is not DEFAULT: kwargs["script"] = script super().__init__(kwargs) class IntervalsFuzzy(AttrDict[Any]): """ :arg term: (required) The term to match. :arg analyzer: Analyzer used to normalize the term. :arg fuzziness: Maximum edit distance allowed for matching. Defaults to `auto` if omitted. :arg prefix_length: Number of beginning characters left unchanged when creating expansions. :arg transpositions: Indicates whether edits include transpositions of two adjacent characters (for example, `ab` to `ba`). Defaults to `True` if omitted. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. """ term: Union[str, DefaultType] analyzer: Union[str, DefaultType] fuzziness: Union[str, int, DefaultType] prefix_length: Union[int, DefaultType] transpositions: Union[bool, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, term: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, fuzziness: Union[str, int, DefaultType] = DEFAULT, prefix_length: Union[int, DefaultType] = DEFAULT, transpositions: Union[bool, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if term is not DEFAULT: kwargs["term"] = term if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if fuzziness is not DEFAULT: kwargs["fuzziness"] = fuzziness if prefix_length is not DEFAULT: kwargs["prefix_length"] = prefix_length if transpositions is not DEFAULT: kwargs["transpositions"] = transpositions if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) super().__init__(kwargs) class IntervalsMatch(AttrDict[Any]): """ :arg query: (required) Text you wish to find in the provided field. :arg analyzer: Analyzer used to analyze terms in the query. :arg max_gaps: Maximum number of positions between the matching terms. Terms further apart than this are not considered matches. Defaults to `-1` if omitted. :arg ordered: If `true`, matching terms must appear in their specified order. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `term` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. :arg filter: An optional interval filter. """ query: Union[str, DefaultType] analyzer: Union[str, DefaultType] max_gaps: Union[int, DefaultType] ordered: Union[bool, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] def __init__( self, *, query: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, max_gaps: Union[int, DefaultType] = DEFAULT, ordered: Union[bool, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, filter: Union["IntervalsFilter", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if max_gaps is not DEFAULT: kwargs["max_gaps"] = max_gaps if ordered is not DEFAULT: kwargs["ordered"] = ordered if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) if filter is not DEFAULT: kwargs["filter"] = filter super().__init__(kwargs) class IntervalsPrefix(AttrDict[Any]): """ :arg prefix: (required) Beginning characters of terms you wish to find in the top-level field. :arg analyzer: Analyzer used to analyze the `prefix`. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. """ prefix: Union[str, DefaultType] analyzer: Union[str, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, prefix: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if prefix is not DEFAULT: kwargs["prefix"] = prefix if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) super().__init__(kwargs) class IntervalsQuery(AttrDict[Any]): """ :arg all_of: Returns matches that span a combination of other rules. :arg any_of: Returns intervals produced by any of its sub-rules. :arg fuzzy: Matches terms that are similar to the provided term, within an edit distance defined by `fuzziness`. :arg match: Matches analyzed text. :arg prefix: Matches terms that start with a specified set of characters. :arg range: :arg regexp: :arg wildcard: Matches terms using a wildcard pattern. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] match: Union["IntervalsMatch", Dict[str, Any], DefaultType] prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] range: Union["IntervalsRange", Dict[str, Any], DefaultType] regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, all_of: Union["IntervalsAllOf", Dict[str, Any], DefaultType] = DEFAULT, any_of: Union["IntervalsAnyOf", Dict[str, Any], DefaultType] = DEFAULT, fuzzy: Union["IntervalsFuzzy", Dict[str, Any], DefaultType] = DEFAULT, match: Union["IntervalsMatch", Dict[str, Any], DefaultType] = DEFAULT, prefix: Union["IntervalsPrefix", Dict[str, Any], DefaultType] = DEFAULT, range: Union["IntervalsRange", Dict[str, Any], DefaultType] = DEFAULT, regexp: Union["IntervalsRegexp", Dict[str, Any], DefaultType] = DEFAULT, wildcard: Union["IntervalsWildcard", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if all_of is not DEFAULT: kwargs["all_of"] = all_of if any_of is not DEFAULT: kwargs["any_of"] = any_of if fuzzy is not DEFAULT: kwargs["fuzzy"] = fuzzy if match is not DEFAULT: kwargs["match"] = match if prefix is not DEFAULT: kwargs["prefix"] = prefix if range is not DEFAULT: kwargs["range"] = range if regexp is not DEFAULT: kwargs["regexp"] = regexp if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class IntervalsRange(AttrDict[Any]): """ :arg analyzer: Analyzer used to analyze the `prefix`. :arg gte: Lower term, either gte or gt must be provided. :arg gt: Lower term, either gte or gt must be provided. :arg lte: Upper term, either lte or lt must be provided. :arg lt: Upper term, either lte or lt must be provided. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. """ analyzer: Union[str, DefaultType] gte: Union[str, DefaultType] gt: Union[str, DefaultType] lte: Union[str, DefaultType] lt: Union[str, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, analyzer: Union[str, DefaultType] = DEFAULT, gte: Union[str, DefaultType] = DEFAULT, gt: Union[str, DefaultType] = DEFAULT, lte: Union[str, DefaultType] = DEFAULT, lt: Union[str, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if gte is not DEFAULT: kwargs["gte"] = gte if gt is not DEFAULT: kwargs["gt"] = gt if lte is not DEFAULT: kwargs["lte"] = lte if lt is not DEFAULT: kwargs["lt"] = lt if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) super().__init__(kwargs) class IntervalsRegexp(AttrDict[Any]): """ :arg pattern: (required) Regex pattern. :arg analyzer: Analyzer used to analyze the `prefix`. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `prefix` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. """ pattern: Union[str, DefaultType] analyzer: Union[str, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, pattern: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if pattern is not DEFAULT: kwargs["pattern"] = pattern if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) super().__init__(kwargs) class IntervalsWildcard(AttrDict[Any]): """ :arg pattern: (required) Wildcard pattern used to find matching terms. :arg analyzer: Analyzer used to analyze the `pattern`. Defaults to the top-level field's analyzer. :arg use_field: If specified, match intervals from this field rather than the top-level field. The `pattern` is normalized using the search analyzer from this field, unless `analyzer` is specified separately. """ pattern: Union[str, DefaultType] analyzer: Union[str, DefaultType] use_field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, pattern: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, use_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if pattern is not DEFAULT: kwargs["pattern"] = pattern if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if use_field is not DEFAULT: kwargs["use_field"] = str(use_field) super().__init__(kwargs) class IpRangeAggregationRange(AttrDict[Any]): """ :arg from: Start of the range. :arg mask: IP range defined as a CIDR mask. :arg to: End of the range. """ from_: Union[str, None, DefaultType] mask: Union[str, DefaultType] to: Union[str, None, DefaultType] def __init__( self, *, from_: Union[str, None, DefaultType] = DEFAULT, mask: Union[str, DefaultType] = DEFAULT, to: Union[str, None, DefaultType] = DEFAULT, **kwargs: Any, ): if from_ is not DEFAULT: kwargs["from_"] = from_ if mask is not DEFAULT: kwargs["mask"] = mask if to is not DEFAULT: kwargs["to"] = to super().__init__(kwargs) class LatLonGeoLocation(AttrDict[Any]): """ :arg lat: (required) Latitude :arg lon: (required) Longitude """ lat: Union[float, DefaultType] lon: Union[float, DefaultType] def __init__( self, *, lat: Union[float, DefaultType] = DEFAULT, lon: Union[float, DefaultType] = DEFAULT, **kwargs: Any, ): if lat is not DEFAULT: kwargs["lat"] = lat if lon is not DEFAULT: kwargs["lon"] = lon super().__init__(kwargs) class LikeDocument(AttrDict[Any]): """ :arg doc: A document not present in the index. :arg fields: :arg _id: ID of a document. :arg _index: Index of a document. :arg per_field_analyzer: Overrides the default analyzer. :arg routing: :arg version: :arg version_type: Defaults to `'internal'` if omitted. """ doc: Any fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] _id: Union[str, DefaultType] _index: Union[str, DefaultType] per_field_analyzer: Union[Mapping[Union[str, InstrumentedField], str], DefaultType] routing: Union[str, DefaultType] version: Union[int, DefaultType] version_type: Union[ Literal["internal", "external", "external_gte", "force"], DefaultType ] def __init__( self, *, doc: Any = DEFAULT, fields: Union[Sequence[Union[str, InstrumentedField]], DefaultType] = DEFAULT, _id: Union[str, DefaultType] = DEFAULT, _index: Union[str, DefaultType] = DEFAULT, per_field_analyzer: Union[ Mapping[Union[str, InstrumentedField], str], DefaultType ] = DEFAULT, routing: Union[str, DefaultType] = DEFAULT, version: Union[int, DefaultType] = DEFAULT, version_type: Union[ Literal["internal", "external", "external_gte", "force"], DefaultType ] = DEFAULT, **kwargs: Any, ): if doc is not DEFAULT: kwargs["doc"] = doc if fields is not DEFAULT: kwargs["fields"] = str(fields) if _id is not DEFAULT: kwargs["_id"] = _id if _index is not DEFAULT: kwargs["_index"] = _index if per_field_analyzer is not DEFAULT: kwargs["per_field_analyzer"] = str(per_field_analyzer) if routing is not DEFAULT: kwargs["routing"] = routing if version is not DEFAULT: kwargs["version"] = version if version_type is not DEFAULT: kwargs["version_type"] = version_type super().__init__(kwargs) class MatchBoolPrefixQuery(AttrDict[Any]): """ :arg query: (required) Terms you wish to find in the provided field. The last term is used in a prefix query. :arg analyzer: Analyzer used to convert the text in the query value into tokens. :arg fuzziness: Maximum edit distance allowed for matching. Can be applied to the term subqueries constructed for all terms but the final term. :arg fuzzy_rewrite: Method used to rewrite the query. Can be applied to the term subqueries constructed for all terms but the final term. :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Can be applied to the term subqueries constructed for all terms but the final term. Defaults to `True` if omitted. :arg max_expansions: Maximum number of terms to which the query will expand. Can be applied to the term subqueries constructed for all terms but the final term. Defaults to `50` if omitted. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. Applied to the constructed bool query. :arg operator: Boolean logic used to interpret text in the query value. Applied to the constructed bool query. Defaults to `'or'` if omitted. :arg prefix_length: Number of beginning characters left unchanged for fuzzy matching. Can be applied to the term subqueries constructed for all terms but the final term. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ query: Union[str, DefaultType] analyzer: Union[str, DefaultType] fuzziness: Union[str, int, DefaultType] fuzzy_rewrite: Union[str, DefaultType] fuzzy_transpositions: Union[bool, DefaultType] max_expansions: Union[int, DefaultType] minimum_should_match: Union[int, str, DefaultType] operator: Union[Literal["and", "or"], DefaultType] prefix_length: Union[int, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, query: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, fuzziness: Union[str, int, DefaultType] = DEFAULT, fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, max_expansions: Union[int, DefaultType] = DEFAULT, minimum_should_match: Union[int, str, DefaultType] = DEFAULT, operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, prefix_length: Union[int, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if fuzziness is not DEFAULT: kwargs["fuzziness"] = fuzziness if fuzzy_rewrite is not DEFAULT: kwargs["fuzzy_rewrite"] = fuzzy_rewrite if fuzzy_transpositions is not DEFAULT: kwargs["fuzzy_transpositions"] = fuzzy_transpositions if max_expansions is not DEFAULT: kwargs["max_expansions"] = max_expansions if minimum_should_match is not DEFAULT: kwargs["minimum_should_match"] = minimum_should_match if operator is not DEFAULT: kwargs["operator"] = operator if prefix_length is not DEFAULT: kwargs["prefix_length"] = prefix_length if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class MatchPhrasePrefixQuery(AttrDict[Any]): """ :arg query: (required) Text you wish to find in the provided field. :arg analyzer: Analyzer used to convert text in the query value into tokens. :arg max_expansions: Maximum number of terms to which the last provided term of the query value will expand. Defaults to `50` if omitted. :arg slop: Maximum number of positions allowed between matching tokens. :arg zero_terms_query: Indicates whether no documents are returned if the analyzer removes all tokens, such as when using a `stop` filter. Defaults to `none` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ query: Union[str, DefaultType] analyzer: Union[str, DefaultType] max_expansions: Union[int, DefaultType] slop: Union[int, DefaultType] zero_terms_query: Union[Literal["all", "none"], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, query: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, max_expansions: Union[int, DefaultType] = DEFAULT, slop: Union[int, DefaultType] = DEFAULT, zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if max_expansions is not DEFAULT: kwargs["max_expansions"] = max_expansions if slop is not DEFAULT: kwargs["slop"] = slop if zero_terms_query is not DEFAULT: kwargs["zero_terms_query"] = zero_terms_query if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class MatchPhraseQuery(AttrDict[Any]): """ :arg query: (required) Query terms that are analyzed and turned into a phrase query. :arg analyzer: Analyzer used to convert the text in the query value into tokens. :arg slop: Maximum number of positions allowed between matching tokens. :arg zero_terms_query: Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. Defaults to `'none'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ query: Union[str, DefaultType] analyzer: Union[str, DefaultType] slop: Union[int, DefaultType] zero_terms_query: Union[Literal["all", "none"], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, query: Union[str, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, slop: Union[int, DefaultType] = DEFAULT, zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if slop is not DEFAULT: kwargs["slop"] = slop if zero_terms_query is not DEFAULT: kwargs["zero_terms_query"] = zero_terms_query if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class MatchQuery(AttrDict[Any]): """ :arg query: (required) Text, number, boolean value or date you wish to find in the provided field. :arg analyzer: Analyzer used to convert the text in the query value into tokens. :arg auto_generate_synonyms_phrase_query: If `true`, match phrase queries are automatically created for multi-term synonyms. Defaults to `True` if omitted. :arg cutoff_frequency: :arg fuzziness: Maximum edit distance allowed for matching. :arg fuzzy_rewrite: Method used to rewrite the query. :arg fuzzy_transpositions: If `true`, edits for fuzzy matching include transpositions of two adjacent characters (for example, `ab` to `ba`). Defaults to `True` if omitted. :arg lenient: If `true`, format-based errors, such as providing a text query value for a numeric field, are ignored. :arg max_expansions: Maximum number of terms to which the query will expand. Defaults to `50` if omitted. :arg minimum_should_match: Minimum number of clauses that must match for a document to be returned. :arg operator: Boolean logic used to interpret text in the query value. Defaults to `'or'` if omitted. :arg prefix_length: Number of beginning characters left unchanged for fuzzy matching. :arg zero_terms_query: Indicates whether no documents are returned if the `analyzer` removes all tokens, such as when using a `stop` filter. Defaults to `'none'` if omitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ query: Union[str, float, bool, DefaultType] analyzer: Union[str, DefaultType] auto_generate_synonyms_phrase_query: Union[bool, DefaultType] cutoff_frequency: Union[float, DefaultType] fuzziness: Union[str, int, DefaultType] fuzzy_rewrite: Union[str, DefaultType] fuzzy_transpositions: Union[bool, DefaultType] lenient: Union[bool, DefaultType] max_expansions: Union[int, DefaultType] minimum_should_match: Union[int, str, DefaultType] operator: Union[Literal["and", "or"], DefaultType] prefix_length: Union[int, DefaultType] zero_terms_query: Union[Literal["all", "none"], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, query: Union[str, float, bool, DefaultType] = DEFAULT, analyzer: Union[str, DefaultType] = DEFAULT, auto_generate_synonyms_phrase_query: Union[bool, DefaultType] = DEFAULT, cutoff_frequency: Union[float, DefaultType] = DEFAULT, fuzziness: Union[str, int, DefaultType] = DEFAULT, fuzzy_rewrite: Union[str, DefaultType] = DEFAULT, fuzzy_transpositions: Union[bool, DefaultType] = DEFAULT, lenient: Union[bool, DefaultType] = DEFAULT, max_expansions: Union[int, DefaultType] = DEFAULT, minimum_should_match: Union[int, str, DefaultType] = DEFAULT, operator: Union[Literal["and", "or"], DefaultType] = DEFAULT, prefix_length: Union[int, DefaultType] = DEFAULT, zero_terms_query: Union[Literal["all", "none"], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if query is not DEFAULT: kwargs["query"] = query if analyzer is not DEFAULT: kwargs["analyzer"] = analyzer if auto_generate_synonyms_phrase_query is not DEFAULT: kwargs["auto_generate_synonyms_phrase_query"] = ( auto_generate_synonyms_phrase_query ) if cutoff_frequency is not DEFAULT: kwargs["cutoff_frequency"] = cutoff_frequency if fuzziness is not DEFAULT: kwargs["fuzziness"] = fuzziness if fuzzy_rewrite is not DEFAULT: kwargs["fuzzy_rewrite"] = fuzzy_rewrite if fuzzy_transpositions is not DEFAULT: kwargs["fuzzy_transpositions"] = fuzzy_transpositions if lenient is not DEFAULT: kwargs["lenient"] = lenient if max_expansions is not DEFAULT: kwargs["max_expansions"] = max_expansions if minimum_should_match is not DEFAULT: kwargs["minimum_should_match"] = minimum_should_match if operator is not DEFAULT: kwargs["operator"] = operator if prefix_length is not DEFAULT: kwargs["prefix_length"] = prefix_length if zero_terms_query is not DEFAULT: kwargs["zero_terms_query"] = zero_terms_query if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class MultiTermLookup(AttrDict[Any]): """ :arg field: (required) A fields from which to retrieve terms. :arg missing: The value to apply to documents that do not have a value. By default, documents without a value are ignored. """ field: Union[str, InstrumentedField, DefaultType] missing: Union[str, int, float, bool, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, missing: Union[str, int, float, bool, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if missing is not DEFAULT: kwargs["missing"] = missing super().__init__(kwargs) class MutualInformationHeuristic(AttrDict[Any]): """ :arg background_is_superset: Set to `false` if you defined a custom background filter that represents a different set of documents that you want to compare to. :arg include_negatives: Set to `false` to filter out the terms that appear less often in the subset than in documents outside the subset. """ background_is_superset: Union[bool, DefaultType] include_negatives: Union[bool, DefaultType] def __init__( self, *, background_is_superset: Union[bool, DefaultType] = DEFAULT, include_negatives: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if background_is_superset is not DEFAULT: kwargs["background_is_superset"] = background_is_superset if include_negatives is not DEFAULT: kwargs["include_negatives"] = include_negatives super().__init__(kwargs) class NestedSortValue(AttrDict[Any]): """ :arg path: (required) :arg filter: :arg max_children: :arg nested: """ path: Union[str, InstrumentedField, DefaultType] filter: Union[Query, DefaultType] max_children: Union[int, DefaultType] nested: Union["NestedSortValue", Dict[str, Any], DefaultType] def __init__( self, *, path: Union[str, InstrumentedField, DefaultType] = DEFAULT, filter: Union[Query, DefaultType] = DEFAULT, max_children: Union[int, DefaultType] = DEFAULT, nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if path is not DEFAULT: kwargs["path"] = str(path) if filter is not DEFAULT: kwargs["filter"] = filter if max_children is not DEFAULT: kwargs["max_children"] = max_children if nested is not DEFAULT: kwargs["nested"] = nested super().__init__(kwargs) class NumericFielddata(AttrDict[Any]): """ :arg format: (required) """ format: Union[Literal["array", "disabled"], DefaultType] def __init__( self, *, format: Union[Literal["array", "disabled"], DefaultType] = DEFAULT, **kwargs: Any, ): if format is not DEFAULT: kwargs["format"] = format super().__init__(kwargs) class PercentageScoreHeuristic(AttrDict[Any]): pass class PinnedDoc(AttrDict[Any]): """ :arg _id: (required) The unique document ID. :arg _index: The index that contains the document. """ _id: Union[str, DefaultType] _index: Union[str, DefaultType] def __init__( self, *, _id: Union[str, DefaultType] = DEFAULT, _index: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if _id is not DEFAULT: kwargs["_id"] = _id if _index is not DEFAULT: kwargs["_index"] = _index super().__init__(kwargs) class PrefixQuery(AttrDict[Any]): """ :arg value: (required) Beginning characters of terms you wish to find in the provided field. :arg rewrite: Method used to rewrite the query. :arg case_insensitive: Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. Default is `false` which means the case sensitivity of matching depends on the underlying field’s mapping. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ value: Union[str, DefaultType] rewrite: Union[str, DefaultType] case_insensitive: Union[bool, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, value: Union[str, DefaultType] = DEFAULT, rewrite: Union[str, DefaultType] = DEFAULT, case_insensitive: Union[bool, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if rewrite is not DEFAULT: kwargs["rewrite"] = rewrite if case_insensitive is not DEFAULT: kwargs["case_insensitive"] = case_insensitive if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class QueryVectorBuilder(AttrDict[Any]): """ :arg text_embedding: """ text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] def __init__( self, *, text_embedding: Union["TextEmbedding", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if text_embedding is not DEFAULT: kwargs["text_embedding"] = text_embedding super().__init__(kwargs) class RankFeatureFunctionLinear(AttrDict[Any]): pass class RankFeatureFunctionLogarithm(AttrDict[Any]): """ :arg scaling_factor: (required) Configurable scaling factor. """ scaling_factor: Union[float, DefaultType] def __init__( self, *, scaling_factor: Union[float, DefaultType] = DEFAULT, **kwargs: Any ): if scaling_factor is not DEFAULT: kwargs["scaling_factor"] = scaling_factor super().__init__(kwargs) class RankFeatureFunctionSaturation(AttrDict[Any]): """ :arg pivot: Configurable pivot value so that the result will be less than 0.5. """ pivot: Union[float, DefaultType] def __init__(self, *, pivot: Union[float, DefaultType] = DEFAULT, **kwargs: Any): if pivot is not DEFAULT: kwargs["pivot"] = pivot super().__init__(kwargs) class RankFeatureFunctionSigmoid(AttrDict[Any]): """ :arg pivot: (required) Configurable pivot value so that the result will be less than 0.5. :arg exponent: (required) Configurable Exponent. """ pivot: Union[float, DefaultType] exponent: Union[float, DefaultType] def __init__( self, *, pivot: Union[float, DefaultType] = DEFAULT, exponent: Union[float, DefaultType] = DEFAULT, **kwargs: Any, ): if pivot is not DEFAULT: kwargs["pivot"] = pivot if exponent is not DEFAULT: kwargs["exponent"] = exponent super().__init__(kwargs) class RegexpQuery(AttrDict[Any]): """ :arg value: (required) Regular expression for terms you wish to find in the provided field. :arg case_insensitive: Allows case insensitive matching of the regular expression value with the indexed field values when set to `true`. When `false`, case sensitivity of matching depends on the underlying field’s mapping. :arg flags: Enables optional operators for the regular expression. :arg max_determinized_states: Maximum number of automaton states required for the query. Defaults to `10000` if omitted. :arg rewrite: Method used to rewrite the query. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ value: Union[str, DefaultType] case_insensitive: Union[bool, DefaultType] flags: Union[str, DefaultType] max_determinized_states: Union[int, DefaultType] rewrite: Union[str, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, value: Union[str, DefaultType] = DEFAULT, case_insensitive: Union[bool, DefaultType] = DEFAULT, flags: Union[str, DefaultType] = DEFAULT, max_determinized_states: Union[int, DefaultType] = DEFAULT, rewrite: Union[str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if case_insensitive is not DEFAULT: kwargs["case_insensitive"] = case_insensitive if flags is not DEFAULT: kwargs["flags"] = flags if max_determinized_states is not DEFAULT: kwargs["max_determinized_states"] = max_determinized_states if rewrite is not DEFAULT: kwargs["rewrite"] = rewrite if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class RegressionInferenceOptions(AttrDict[Any]): """ :arg results_field: The field that is added to incoming documents to contain the inference prediction. Defaults to predicted_value. :arg num_top_feature_importance_values: Specifies the maximum number of feature importance values per document. """ results_field: Union[str, InstrumentedField, DefaultType] num_top_feature_importance_values: Union[int, DefaultType] def __init__( self, *, results_field: Union[str, InstrumentedField, DefaultType] = DEFAULT, num_top_feature_importance_values: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if results_field is not DEFAULT: kwargs["results_field"] = str(results_field) if num_top_feature_importance_values is not DEFAULT: kwargs["num_top_feature_importance_values"] = ( num_top_feature_importance_values ) super().__init__(kwargs) class RescoreVector(AttrDict[Any]): """ :arg oversample: (required) Applies the specified oversample factor to k on the approximate kNN search """ oversample: Union[float, DefaultType] def __init__( self, *, oversample: Union[float, DefaultType] = DEFAULT, **kwargs: Any ): if oversample is not DEFAULT: kwargs["oversample"] = oversample super().__init__(kwargs) class ScoreSort(AttrDict[Any]): """ :arg order: """ order: Union[Literal["asc", "desc"], DefaultType] def __init__( self, *, order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, **kwargs: Any, ): if order is not DEFAULT: kwargs["order"] = order super().__init__(kwargs) class Script(AttrDict[Any]): """ :arg source: The script source. :arg id: The `id` for a stored script. :arg params: Specifies any named parameters that are passed into the script as variables. Use parameters instead of hard-coded values to decrease compile time. :arg lang: Specifies the language the script is written in. Defaults to `painless` if omitted. :arg options: """ source: Union[str, Dict[str, Any], DefaultType] id: Union[str, DefaultType] params: Union[Mapping[str, Any], DefaultType] lang: Union[Literal["painless", "expression", "mustache", "java"], DefaultType] options: Union[Mapping[str, str], DefaultType] def __init__( self, *, source: Union[str, Dict[str, Any], DefaultType] = DEFAULT, id: Union[str, DefaultType] = DEFAULT, params: Union[Mapping[str, Any], DefaultType] = DEFAULT, lang: Union[ Literal["painless", "expression", "mustache", "java"], DefaultType ] = DEFAULT, options: Union[Mapping[str, str], DefaultType] = DEFAULT, **kwargs: Any, ): if source is not DEFAULT: kwargs["source"] = source if id is not DEFAULT: kwargs["id"] = id if params is not DEFAULT: kwargs["params"] = params if lang is not DEFAULT: kwargs["lang"] = lang if options is not DEFAULT: kwargs["options"] = options super().__init__(kwargs) class ScriptField(AttrDict[Any]): """ :arg script: (required) :arg ignore_failure: """ script: Union["Script", Dict[str, Any], DefaultType] ignore_failure: Union[bool, DefaultType] def __init__( self, *, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, ignore_failure: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if script is not DEFAULT: kwargs["script"] = script if ignore_failure is not DEFAULT: kwargs["ignore_failure"] = ignore_failure super().__init__(kwargs) class ScriptSort(AttrDict[Any]): """ :arg script: (required) :arg order: :arg type: :arg mode: :arg nested: """ script: Union["Script", Dict[str, Any], DefaultType] order: Union[Literal["asc", "desc"], DefaultType] type: Union[Literal["string", "number", "version"], DefaultType] mode: Union[Literal["min", "max", "sum", "avg", "median"], DefaultType] nested: Union["NestedSortValue", Dict[str, Any], DefaultType] def __init__( self, *, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, order: Union[Literal["asc", "desc"], DefaultType] = DEFAULT, type: Union[Literal["string", "number", "version"], DefaultType] = DEFAULT, mode: Union[ Literal["min", "max", "sum", "avg", "median"], DefaultType ] = DEFAULT, nested: Union["NestedSortValue", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if script is not DEFAULT: kwargs["script"] = script if order is not DEFAULT: kwargs["order"] = order if type is not DEFAULT: kwargs["type"] = type if mode is not DEFAULT: kwargs["mode"] = mode if nested is not DEFAULT: kwargs["nested"] = nested super().__init__(kwargs) class ScriptedHeuristic(AttrDict[Any]): """ :arg script: (required) """ script: Union["Script", Dict[str, Any], DefaultType] def __init__( self, *, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if script is not DEFAULT: kwargs["script"] = script super().__init__(kwargs) class ShapeFieldQuery(AttrDict[Any]): """ :arg indexed_shape: Queries using a pre-indexed shape. :arg relation: Spatial relation between the query shape and the document shape. :arg shape: Queries using an inline shape definition in GeoJSON or Well Known Text (WKT) format. """ indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] relation: Union[ Literal["intersects", "disjoint", "within", "contains"], DefaultType ] shape: Any def __init__( self, *, indexed_shape: Union["FieldLookup", Dict[str, Any], DefaultType] = DEFAULT, relation: Union[ Literal["intersects", "disjoint", "within", "contains"], DefaultType ] = DEFAULT, shape: Any = DEFAULT, **kwargs: Any, ): if indexed_shape is not DEFAULT: kwargs["indexed_shape"] = indexed_shape if relation is not DEFAULT: kwargs["relation"] = relation if shape is not DEFAULT: kwargs["shape"] = shape super().__init__(kwargs) class SortOptions(AttrDict[Any]): """ :arg _field: The field to use in this query. :arg _value: The query value for the field. :arg _score: :arg _doc: :arg _geo_distance: :arg _script: """ _field: Union[str, "InstrumentedField", "DefaultType"] _value: Union["FieldSort", Dict[str, Any], "DefaultType"] _score: Union["ScoreSort", Dict[str, Any], DefaultType] _doc: Union["ScoreSort", Dict[str, Any], DefaultType] _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] _script: Union["ScriptSort", Dict[str, Any], DefaultType] def __init__( self, _field: Union[str, "InstrumentedField", "DefaultType"] = DEFAULT, _value: Union["FieldSort", Dict[str, Any], "DefaultType"] = DEFAULT, *, _score: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, _doc: Union["ScoreSort", Dict[str, Any], DefaultType] = DEFAULT, _geo_distance: Union["GeoDistanceSort", Dict[str, Any], DefaultType] = DEFAULT, _script: Union["ScriptSort", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if _field is not DEFAULT: kwargs[str(_field)] = _value if _score is not DEFAULT: kwargs["_score"] = _score if _doc is not DEFAULT: kwargs["_doc"] = _doc if _geo_distance is not DEFAULT: kwargs["_geo_distance"] = _geo_distance if _script is not DEFAULT: kwargs["_script"] = _script super().__init__(kwargs) class SourceFilter(AttrDict[Any]): """ :arg exclude_vectors: If `true`, vector fields are excluded from the returned source. This option takes precedence over `includes`: any vector field will remain excluded even if it matches an `includes` rule. :arg excludes: A list of fields to exclude from the returned source. :arg includes: A list of fields to include in the returned source. """ exclude_vectors: Union[bool, DefaultType] excludes: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] includes: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] def __init__( self, *, exclude_vectors: Union[bool, DefaultType] = DEFAULT, excludes: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] = DEFAULT, includes: Union[ Union[str, InstrumentedField], Sequence[Union[str, InstrumentedField]], DefaultType, ] = DEFAULT, **kwargs: Any, ): if exclude_vectors is not DEFAULT: kwargs["exclude_vectors"] = exclude_vectors if excludes is not DEFAULT: kwargs["excludes"] = str(excludes) if includes is not DEFAULT: kwargs["includes"] = str(includes) super().__init__(kwargs) class SpanContainingQuery(AttrDict[Any]): """ :arg big: (required) Can be any span query. Matching spans from `big` that contain matches from `little` are returned. :arg little: (required) Can be any span query. Matching spans from `big` that contain matches from `little` are returned. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ big: Union["SpanQuery", Dict[str, Any], DefaultType] little: Union["SpanQuery", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if big is not DEFAULT: kwargs["big"] = big if little is not DEFAULT: kwargs["little"] = little if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanFieldMaskingQuery(AttrDict[Any]): """ :arg field: (required) :arg query: (required) :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ field: Union[str, InstrumentedField, DefaultType] query: Union["SpanQuery", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, query: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if query is not DEFAULT: kwargs["query"] = query if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanFirstQuery(AttrDict[Any]): """ :arg end: (required) Controls the maximum end position permitted in a match. :arg match: (required) Can be any other span type query. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ end: Union[int, DefaultType] match: Union["SpanQuery", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, end: Union[int, DefaultType] = DEFAULT, match: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if end is not DEFAULT: kwargs["end"] = end if match is not DEFAULT: kwargs["match"] = match if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanMultiTermQuery(AttrDict[Any]): """ :arg match: (required) Should be a multi term query (one of `wildcard`, `fuzzy`, `prefix`, `range`, or `regexp` query). :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ match: Union[Query, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, match: Union[Query, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if match is not DEFAULT: kwargs["match"] = match if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanNearQuery(AttrDict[Any]): """ :arg clauses: (required) Array of one or more other span type queries. :arg in_order: Controls whether matches are required to be in-order. :arg slop: Controls the maximum number of intervening unmatched positions permitted. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType] in_order: Union[bool, DefaultType] slop: Union[int, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, clauses: Union[ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, in_order: Union[bool, DefaultType] = DEFAULT, slop: Union[int, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if clauses is not DEFAULT: kwargs["clauses"] = clauses if in_order is not DEFAULT: kwargs["in_order"] = in_order if slop is not DEFAULT: kwargs["slop"] = slop if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanNotQuery(AttrDict[Any]): """ :arg exclude: (required) Span query whose matches must not overlap those returned. :arg include: (required) Span query whose matches are filtered. :arg dist: The number of tokens from within the include span that can’t have overlap with the exclude span. Equivalent to setting both `pre` and `post`. :arg post: The number of tokens after the include span that can’t have overlap with the exclude span. :arg pre: The number of tokens before the include span that can’t have overlap with the exclude span. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ exclude: Union["SpanQuery", Dict[str, Any], DefaultType] include: Union["SpanQuery", Dict[str, Any], DefaultType] dist: Union[int, DefaultType] post: Union[int, DefaultType] pre: Union[int, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, exclude: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, include: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, dist: Union[int, DefaultType] = DEFAULT, post: Union[int, DefaultType] = DEFAULT, pre: Union[int, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if exclude is not DEFAULT: kwargs["exclude"] = exclude if include is not DEFAULT: kwargs["include"] = include if dist is not DEFAULT: kwargs["dist"] = dist if post is not DEFAULT: kwargs["post"] = post if pre is not DEFAULT: kwargs["pre"] = pre if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanOrQuery(AttrDict[Any]): """ :arg clauses: (required) Array of one or more other span type queries. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ clauses: Union[Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, clauses: Union[ Sequence["SpanQuery"], Sequence[Dict[str, Any]], DefaultType ] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if clauses is not DEFAULT: kwargs["clauses"] = clauses if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanQuery(AttrDict[Any]): """ :arg span_containing: Accepts a list of span queries, but only returns those spans which also match a second span query. :arg span_field_masking: Allows queries like `span_near` or `span_or` across different fields. :arg span_first: Accepts another span query whose matches must appear within the first N positions of the field. :arg span_gap: :arg span_multi: Wraps a `term`, `range`, `prefix`, `wildcard`, `regexp`, or `fuzzy` query. :arg span_near: Accepts multiple span queries whose matches must be within the specified distance of each other, and possibly in the same order. :arg span_not: Wraps another span query, and excludes any documents which match that query. :arg span_or: Combines multiple span queries and returns documents which match any of the specified queries. :arg span_term: The equivalent of the `term` query but for use with other span queries. :arg span_within: The result from a single span query is returned as long is its span falls within the spans returned by a list of other span queries. """ span_containing: Union["SpanContainingQuery", Dict[str, Any], DefaultType] span_field_masking: Union["SpanFieldMaskingQuery", Dict[str, Any], DefaultType] span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] span_gap: Union[Mapping[Union[str, InstrumentedField], int], DefaultType] span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] span_term: Union[ Mapping[Union[str, InstrumentedField], "SpanTermQuery"], Dict[str, Any], DefaultType, ] span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] def __init__( self, *, span_containing: Union[ "SpanContainingQuery", Dict[str, Any], DefaultType ] = DEFAULT, span_field_masking: Union[ "SpanFieldMaskingQuery", Dict[str, Any], DefaultType ] = DEFAULT, span_first: Union["SpanFirstQuery", Dict[str, Any], DefaultType] = DEFAULT, span_gap: Union[ Mapping[Union[str, InstrumentedField], int], DefaultType ] = DEFAULT, span_multi: Union["SpanMultiTermQuery", Dict[str, Any], DefaultType] = DEFAULT, span_near: Union["SpanNearQuery", Dict[str, Any], DefaultType] = DEFAULT, span_not: Union["SpanNotQuery", Dict[str, Any], DefaultType] = DEFAULT, span_or: Union["SpanOrQuery", Dict[str, Any], DefaultType] = DEFAULT, span_term: Union[ Mapping[Union[str, InstrumentedField], "SpanTermQuery"], Dict[str, Any], DefaultType, ] = DEFAULT, span_within: Union["SpanWithinQuery", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if span_containing is not DEFAULT: kwargs["span_containing"] = span_containing if span_field_masking is not DEFAULT: kwargs["span_field_masking"] = span_field_masking if span_first is not DEFAULT: kwargs["span_first"] = span_first if span_gap is not DEFAULT: kwargs["span_gap"] = str(span_gap) if span_multi is not DEFAULT: kwargs["span_multi"] = span_multi if span_near is not DEFAULT: kwargs["span_near"] = span_near if span_not is not DEFAULT: kwargs["span_not"] = span_not if span_or is not DEFAULT: kwargs["span_or"] = span_or if span_term is not DEFAULT: kwargs["span_term"] = str(span_term) if span_within is not DEFAULT: kwargs["span_within"] = span_within super().__init__(kwargs) class SpanTermQuery(AttrDict[Any]): """ :arg value: (required) :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ value: Union[int, float, str, bool, None, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, value: Union[int, float, str, bool, None, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SpanWithinQuery(AttrDict[Any]): """ :arg big: (required) Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. :arg little: (required) Can be any span query. Matching spans from `little` that are enclosed within `big` are returned. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ big: Union["SpanQuery", Dict[str, Any], DefaultType] little: Union["SpanQuery", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, big: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, little: Union["SpanQuery", Dict[str, Any], DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if big is not DEFAULT: kwargs["big"] = big if little is not DEFAULT: kwargs["little"] = little if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class SparseVectorIndexOptions(AttrDict[Any]): """ :arg prune: Whether to perform pruning, omitting the non-significant tokens from the query to improve query performance. If prune is true but the pruning_config is not specified, pruning will occur but default values will be used. Default: false :arg pruning_config: Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. This is only used if prune is set to true. If prune is set to true but pruning_config is not specified, default values will be used. """ prune: Union[bool, DefaultType] pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] def __init__( self, *, prune: Union[bool, DefaultType] = DEFAULT, pruning_config: Union[ "TokenPruningConfig", Dict[str, Any], DefaultType ] = DEFAULT, **kwargs: Any, ): if prune is not DEFAULT: kwargs["prune"] = prune if pruning_config is not DEFAULT: kwargs["pruning_config"] = pruning_config super().__init__(kwargs) class SuggestContext(AttrDict[Any]): """ :arg name: (required) :arg type: (required) :arg path: :arg precision: """ name: Union[str, DefaultType] type: Union[str, DefaultType] path: Union[str, InstrumentedField, DefaultType] precision: Union[int, str, DefaultType] def __init__( self, *, name: Union[str, DefaultType] = DEFAULT, type: Union[str, DefaultType] = DEFAULT, path: Union[str, InstrumentedField, DefaultType] = DEFAULT, precision: Union[int, str, DefaultType] = DEFAULT, **kwargs: Any, ): if name is not DEFAULT: kwargs["name"] = name if type is not DEFAULT: kwargs["type"] = type if path is not DEFAULT: kwargs["path"] = str(path) if precision is not DEFAULT: kwargs["precision"] = precision super().__init__(kwargs) class TDigest(AttrDict[Any]): """ :arg compression: Limits the maximum number of nodes used by the underlying TDigest algorithm to `20 * compression`, enabling control of memory usage and approximation error. :arg execution_hint: The default implementation of TDigest is optimized for performance, scaling to millions or even billions of sample values while maintaining acceptable accuracy levels (close to 1% relative error for millions of samples in some cases). To use an implementation optimized for accuracy, set this parameter to high_accuracy instead. Defaults to `default` if omitted. """ compression: Union[int, DefaultType] execution_hint: Union[Literal["default", "high_accuracy"], DefaultType] def __init__( self, *, compression: Union[int, DefaultType] = DEFAULT, execution_hint: Union[ Literal["default", "high_accuracy"], DefaultType ] = DEFAULT, **kwargs: Any, ): if compression is not DEFAULT: kwargs["compression"] = compression if execution_hint is not DEFAULT: kwargs["execution_hint"] = execution_hint super().__init__(kwargs) class TermQuery(AttrDict[Any]): """ :arg value: (required) Term you wish to find in the provided field. :arg case_insensitive: Allows ASCII case insensitive matching of the value with the indexed field values when set to `true`. When `false`, the case sensitivity of matching depends on the underlying field’s mapping. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ value: Union[int, float, str, bool, None, DefaultType] case_insensitive: Union[bool, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, value: Union[int, float, str, bool, None, DefaultType] = DEFAULT, case_insensitive: Union[bool, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if value is not DEFAULT: kwargs["value"] = value if case_insensitive is not DEFAULT: kwargs["case_insensitive"] = case_insensitive if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class TermsLookup(AttrDict[Any]): """ :arg index: (required) :arg id: (required) :arg path: (required) :arg routing: """ index: Union[str, DefaultType] id: Union[str, DefaultType] path: Union[str, InstrumentedField, DefaultType] routing: Union[str, DefaultType] def __init__( self, *, index: Union[str, DefaultType] = DEFAULT, id: Union[str, DefaultType] = DEFAULT, path: Union[str, InstrumentedField, DefaultType] = DEFAULT, routing: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if index is not DEFAULT: kwargs["index"] = index if id is not DEFAULT: kwargs["id"] = id if path is not DEFAULT: kwargs["path"] = str(path) if routing is not DEFAULT: kwargs["routing"] = routing super().__init__(kwargs) class TermsPartition(AttrDict[Any]): """ :arg num_partitions: (required) The number of partitions. :arg partition: (required) The partition number for this request. """ num_partitions: Union[int, DefaultType] partition: Union[int, DefaultType] def __init__( self, *, num_partitions: Union[int, DefaultType] = DEFAULT, partition: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if num_partitions is not DEFAULT: kwargs["num_partitions"] = num_partitions if partition is not DEFAULT: kwargs["partition"] = partition super().__init__(kwargs) class TermsSetQuery(AttrDict[Any]): """ :arg terms: (required) Array of terms you wish to find in the provided field. :arg minimum_should_match: Specification describing number of matching terms required to return a document. :arg minimum_should_match_field: Numeric field containing the number of matching terms required to return a document. :arg minimum_should_match_script: Custom script containing the number of matching terms required to return a document. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ terms: Union[Sequence[Union[int, float, str, bool, None]], DefaultType] minimum_should_match: Union[int, str, DefaultType] minimum_should_match_field: Union[str, InstrumentedField, DefaultType] minimum_should_match_script: Union["Script", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, terms: Union[ Sequence[Union[int, float, str, bool, None]], DefaultType ] = DEFAULT, minimum_should_match: Union[int, str, DefaultType] = DEFAULT, minimum_should_match_field: Union[ str, InstrumentedField, DefaultType ] = DEFAULT, minimum_should_match_script: Union[ "Script", Dict[str, Any], DefaultType ] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if terms is not DEFAULT: kwargs["terms"] = terms if minimum_should_match is not DEFAULT: kwargs["minimum_should_match"] = minimum_should_match if minimum_should_match_field is not DEFAULT: kwargs["minimum_should_match_field"] = str(minimum_should_match_field) if minimum_should_match_script is not DEFAULT: kwargs["minimum_should_match_script"] = minimum_should_match_script if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class TestPopulation(AttrDict[Any]): """ :arg field: (required) The field to aggregate. :arg script: :arg filter: A filter used to define a set of records to run unpaired t-test on. """ field: Union[str, InstrumentedField, DefaultType] script: Union["Script", Dict[str, Any], DefaultType] filter: Union[Query, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, filter: Union[Query, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if script is not DEFAULT: kwargs["script"] = script if filter is not DEFAULT: kwargs["filter"] = filter super().__init__(kwargs) class TextEmbedding(AttrDict[Any]): """ :arg model_id: (required) :arg model_text: (required) """ model_id: Union[str, DefaultType] model_text: Union[str, DefaultType] def __init__( self, *, model_id: Union[str, DefaultType] = DEFAULT, model_text: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if model_id is not DEFAULT: kwargs["model_id"] = model_id if model_text is not DEFAULT: kwargs["model_text"] = model_text super().__init__(kwargs) class TextExpansionQuery(AttrDict[Any]): """ :arg model_id: (required) The text expansion NLP model to use :arg model_text: (required) The query text :arg pruning_config: Token pruning configurations :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ model_id: Union[str, DefaultType] model_text: Union[str, DefaultType] pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, model_id: Union[str, DefaultType] = DEFAULT, model_text: Union[str, DefaultType] = DEFAULT, pruning_config: Union[ "TokenPruningConfig", Dict[str, Any], DefaultType ] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if model_id is not DEFAULT: kwargs["model_id"] = model_id if model_text is not DEFAULT: kwargs["model_text"] = model_text if pruning_config is not DEFAULT: kwargs["pruning_config"] = pruning_config if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class TextIndexPrefixes(AttrDict[Any]): """ :arg max_chars: (required) :arg min_chars: (required) """ max_chars: Union[int, DefaultType] min_chars: Union[int, DefaultType] def __init__( self, *, max_chars: Union[int, DefaultType] = DEFAULT, min_chars: Union[int, DefaultType] = DEFAULT, **kwargs: Any, ): if max_chars is not DEFAULT: kwargs["max_chars"] = max_chars if min_chars is not DEFAULT: kwargs["min_chars"] = min_chars super().__init__(kwargs) class TokenPruningConfig(AttrDict[Any]): """ :arg tokens_freq_ratio_threshold: Tokens whose frequency is more than this threshold times the average frequency of all tokens in the specified field are considered outliers and pruned. Defaults to `5` if omitted. :arg tokens_weight_threshold: Tokens whose weight is less than this threshold are considered nonsignificant and pruned. Defaults to `0.4` if omitted. :arg only_score_pruned_tokens: Whether to only score pruned tokens, vs only scoring kept tokens. """ tokens_freq_ratio_threshold: Union[int, DefaultType] tokens_weight_threshold: Union[float, DefaultType] only_score_pruned_tokens: Union[bool, DefaultType] def __init__( self, *, tokens_freq_ratio_threshold: Union[int, DefaultType] = DEFAULT, tokens_weight_threshold: Union[float, DefaultType] = DEFAULT, only_score_pruned_tokens: Union[bool, DefaultType] = DEFAULT, **kwargs: Any, ): if tokens_freq_ratio_threshold is not DEFAULT: kwargs["tokens_freq_ratio_threshold"] = tokens_freq_ratio_threshold if tokens_weight_threshold is not DEFAULT: kwargs["tokens_weight_threshold"] = tokens_weight_threshold if only_score_pruned_tokens is not DEFAULT: kwargs["only_score_pruned_tokens"] = only_score_pruned_tokens super().__init__(kwargs) class TopLeftBottomRightGeoBounds(AttrDict[Any]): """ :arg top_left: (required) :arg bottom_right: (required) """ top_left: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] bottom_right: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] def __init__( self, *, top_left: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] = DEFAULT, bottom_right: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] = DEFAULT, **kwargs: Any, ): if top_left is not DEFAULT: kwargs["top_left"] = top_left if bottom_right is not DEFAULT: kwargs["bottom_right"] = bottom_right super().__init__(kwargs) class TopMetricsValue(AttrDict[Any]): """ :arg field: (required) A field to return as a metric. """ field: Union[str, InstrumentedField, DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) super().__init__(kwargs) class TopRightBottomLeftGeoBounds(AttrDict[Any]): """ :arg top_right: (required) :arg bottom_left: (required) """ top_right: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] bottom_left: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] def __init__( self, *, top_right: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] = DEFAULT, bottom_left: Union[ "LatLonGeoLocation", "GeoHashLocation", Sequence[float], str, Dict[str, Any], DefaultType, ] = DEFAULT, **kwargs: Any, ): if top_right is not DEFAULT: kwargs["top_right"] = top_right if bottom_left is not DEFAULT: kwargs["bottom_left"] = bottom_left super().__init__(kwargs) class WeightedAverageValue(AttrDict[Any]): """ :arg field: The field from which to extract the values or weights. :arg missing: A value or weight to use if the field is missing. :arg script: """ field: Union[str, InstrumentedField, DefaultType] missing: Union[float, DefaultType] script: Union["Script", Dict[str, Any], DefaultType] def __init__( self, *, field: Union[str, InstrumentedField, DefaultType] = DEFAULT, missing: Union[float, DefaultType] = DEFAULT, script: Union["Script", Dict[str, Any], DefaultType] = DEFAULT, **kwargs: Any, ): if field is not DEFAULT: kwargs["field"] = str(field) if missing is not DEFAULT: kwargs["missing"] = missing if script is not DEFAULT: kwargs["script"] = script super().__init__(kwargs) class WeightedTokensQuery(AttrDict[Any]): """ :arg tokens: (required) The tokens representing this query :arg pruning_config: Token pruning configurations :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ tokens: Union[Mapping[str, float], Sequence[Mapping[str, float]], DefaultType] pruning_config: Union["TokenPruningConfig", Dict[str, Any], DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, tokens: Union[ Mapping[str, float], Sequence[Mapping[str, float]], DefaultType ] = DEFAULT, pruning_config: Union[ "TokenPruningConfig", Dict[str, Any], DefaultType ] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if tokens is not DEFAULT: kwargs["tokens"] = tokens if pruning_config is not DEFAULT: kwargs["pruning_config"] = pruning_config if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class WildcardQuery(AttrDict[Any]): """ :arg case_insensitive: Allows case insensitive matching of the pattern with the indexed field values when set to true. Default is false which means the case sensitivity of matching depends on the underlying field’s mapping. :arg rewrite: Method used to rewrite the query. :arg value: Wildcard pattern for terms you wish to find in the provided field. Required, when wildcard is not set. :arg wildcard: Wildcard pattern for terms you wish to find in the provided field. Required, when value is not set. :arg boost: Floating point number used to decrease or increase the relevance scores of the query. Boost values are relative to the default value of 1.0. A boost value between 0 and 1.0 decreases the relevance score. A value greater than 1.0 increases the relevance score. Defaults to `1` if omitted. :arg _name: """ case_insensitive: Union[bool, DefaultType] rewrite: Union[str, DefaultType] value: Union[str, DefaultType] wildcard: Union[str, DefaultType] boost: Union[float, DefaultType] _name: Union[str, DefaultType] def __init__( self, *, case_insensitive: Union[bool, DefaultType] = DEFAULT, rewrite: Union[str, DefaultType] = DEFAULT, value: Union[str, DefaultType] = DEFAULT, wildcard: Union[str, DefaultType] = DEFAULT, boost: Union[float, DefaultType] = DEFAULT, _name: Union[str, DefaultType] = DEFAULT, **kwargs: Any, ): if case_insensitive is not DEFAULT: kwargs["case_insensitive"] = case_insensitive if rewrite is not DEFAULT: kwargs["rewrite"] = rewrite if value is not DEFAULT: kwargs["value"] = value if wildcard is not DEFAULT: kwargs["wildcard"] = wildcard if boost is not DEFAULT: kwargs["boost"] = boost if _name is not DEFAULT: kwargs["_name"] = _name super().__init__(kwargs) class WktGeoBounds(AttrDict[Any]): """ :arg wkt: (required) """ wkt: Union[str, DefaultType] def __init__(self, *, wkt: Union[str, DefaultType] = DEFAULT, **kwargs: Any): if wkt is not DEFAULT: kwargs["wkt"] = wkt super().__init__(kwargs) class AdjacencyMatrixAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["AdjacencyMatrixBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "AdjacencyMatrixBucket"]: return self.buckets # type: ignore[return-value] class AdjacencyMatrixBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: str doc_count: int class AggregationBreakdown(AttrDict[Any]): """ :arg build_aggregation: (required) :arg build_aggregation_count: (required) :arg build_leaf_collector: (required) :arg build_leaf_collector_count: (required) :arg collect: (required) :arg collect_count: (required) :arg initialize: (required) :arg initialize_count: (required) :arg reduce: (required) :arg reduce_count: (required) :arg post_collection: :arg post_collection_count: """ build_aggregation: int build_aggregation_count: int build_leaf_collector: int build_leaf_collector_count: int collect: int collect_count: int initialize: int initialize_count: int reduce: int reduce_count: int post_collection: int post_collection_count: int class AggregationProfile(AttrDict[Any]): """ :arg breakdown: (required) :arg description: (required) :arg time_in_nanos: (required) :arg type: (required) :arg debug: :arg children: """ breakdown: "AggregationBreakdown" description: str time_in_nanos: Any type: str debug: "AggregationProfileDebug" children: Sequence["AggregationProfile"] class AggregationProfileDebug(AttrDict[Any]): """ :arg segments_with_multi_valued_ords: :arg collection_strategy: :arg segments_with_single_valued_ords: :arg total_buckets: :arg built_buckets: :arg result_strategy: :arg has_filter: :arg delegate: :arg delegate_debug: :arg chars_fetched: :arg extract_count: :arg extract_ns: :arg values_fetched: :arg collect_analyzed_ns: :arg collect_analyzed_count: :arg surviving_buckets: :arg ordinals_collectors_used: :arg ordinals_collectors_overhead_too_high: :arg string_hashing_collectors_used: :arg numeric_collectors_used: :arg empty_collectors_used: :arg deferred_aggregators: :arg segments_with_doc_count_field: :arg segments_with_deleted_docs: :arg filters: :arg segments_counted: :arg segments_collected: :arg map_reducer: :arg brute_force_used: :arg dynamic_pruning_attempted: :arg dynamic_pruning_used: :arg skipped_due_to_no_data: """ segments_with_multi_valued_ords: int collection_strategy: str segments_with_single_valued_ords: int total_buckets: int built_buckets: int result_strategy: str has_filter: bool delegate: str delegate_debug: "AggregationProfileDebug" chars_fetched: int extract_count: int extract_ns: int values_fetched: int collect_analyzed_ns: int collect_analyzed_count: int surviving_buckets: int ordinals_collectors_used: int ordinals_collectors_overhead_too_high: int string_hashing_collectors_used: int numeric_collectors_used: int empty_collectors_used: int deferred_aggregators: Sequence[str] segments_with_doc_count_field: int segments_with_deleted_docs: int filters: Sequence["AggregationProfileDelegateDebugFilter"] segments_counted: int segments_collected: int map_reducer: str brute_force_used: int dynamic_pruning_attempted: int dynamic_pruning_used: int skipped_due_to_no_data: int class AggregationProfileDelegateDebugFilter(AttrDict[Any]): """ :arg results_from_metadata: :arg query: :arg specialized_for: :arg segments_counted_in_constant_time: """ results_from_metadata: int query: str specialized_for: str segments_counted_in_constant_time: int class ArrayPercentilesItem(AttrDict[Any]): """ :arg key: (required) :arg value: (required) :arg value_as_string: """ key: float value: Union[float, None] value_as_string: str class AutoDateHistogramAggregate(AttrDict[Any]): """ :arg interval: (required) :arg buckets: (required) the aggregation buckets as a list :arg meta: """ interval: str buckets: Sequence["DateHistogramBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]: return self.buckets # type: ignore[return-value] class AvgAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class BoxPlotAggregate(AttrDict[Any]): """ :arg min: (required) :arg max: (required) :arg q1: (required) :arg q2: (required) :arg q3: (required) :arg lower: (required) :arg upper: (required) :arg min_as_string: :arg max_as_string: :arg q1_as_string: :arg q2_as_string: :arg q3_as_string: :arg lower_as_string: :arg upper_as_string: :arg meta: """ min: float max: float q1: float q2: float q3: float lower: float upper: float min_as_string: str max_as_string: str q1_as_string: str q2_as_string: str q3_as_string: str lower_as_string: str upper_as_string: str meta: Mapping[str, Any] class BucketMetricValueAggregate(AttrDict[Any]): """ :arg keys: (required) :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ keys: Sequence[str] # type: ignore[assignment] value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class BulkIndexByScrollFailure(AttrDict[Any]): """ :arg cause: (required) :arg id: (required) :arg index: (required) :arg status: (required) """ cause: "ErrorCause" id: str index: str status: int class CardinalityAggregate(AttrDict[Any]): """ :arg value: (required) :arg meta: """ value: int meta: Mapping[str, Any] class ChildrenAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class ClusterDetails(AttrDict[Any]): """ :arg status: (required) :arg indices: (required) :arg timed_out: (required) :arg took: :arg _shards: :arg failures: """ status: Literal["running", "successful", "partial", "skipped", "failed"] indices: str timed_out: bool took: Any _shards: "ShardStatistics" failures: Sequence["ShardFailure"] class ClusterStatistics(AttrDict[Any]): """ :arg skipped: (required) :arg successful: (required) :arg total: (required) :arg running: (required) :arg partial: (required) :arg failed: (required) :arg details: """ skipped: int successful: int total: int running: int partial: int failed: int details: Mapping[str, "ClusterDetails"] class Collector(AttrDict[Any]): """ :arg name: (required) :arg reason: (required) :arg time_in_nanos: (required) :arg children: """ name: str reason: str time_in_nanos: Any children: Sequence["Collector"] class CompletionSuggest(AttrDict[Any]): """ :arg options: (required) :arg length: (required) :arg offset: (required) :arg text: (required) """ options: Sequence["CompletionSuggestOption"] length: int offset: int text: str class CompletionSuggestOption(AttrDict[Any]): """ :arg text: (required) :arg collate_match: :arg contexts: :arg fields: :arg _id: :arg _index: :arg _routing: :arg _score: :arg _source: :arg score: """ text: str collate_match: bool contexts: Mapping[ str, Sequence[ Union[ str, Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] ] ], ] fields: Mapping[str, Any] _id: str _index: str _routing: str _score: float _source: Any score: float class CompositeAggregate(AttrDict[Any]): """ :arg after_key: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ after_key: Mapping[str, Union[int, float, str, bool, None]] buckets: Sequence["CompositeBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "CompositeBucket"]: return self.buckets # type: ignore[return-value] class CompositeBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: Mapping[str, Union[int, float, str, bool, None]] doc_count: int class CumulativeCardinalityAggregate(AttrDict[Any]): """ Result of the `cumulative_cardinality` aggregation :arg value: (required) :arg value_as_string: :arg meta: """ value: int value_as_string: str meta: Mapping[str, Any] class DateHistogramAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["DateHistogramBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "DateHistogramBucket"]: return self.buckets # type: ignore[return-value] class DateHistogramBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: """ key: Any doc_count: int key_as_string: str class DateRangeAggregate(AttrDict[Any]): """ Result of a `date_range` aggregation. Same format as a for a `range` aggregation: `from` and `to` in `buckets` are milliseconds since the Epoch, represented as a floating point number. :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["RangeBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: return self.buckets # type: ignore[return-value] class DerivativeAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg normalized_value: :arg normalized_value_as_string: :arg value_as_string: :arg meta: """ value: Union[float, None] normalized_value: float normalized_value_as_string: str value_as_string: str meta: Mapping[str, Any] class DfsKnnProfile(AttrDict[Any]): """ :arg query: (required) :arg rewrite_time: (required) :arg collector: (required) :arg vector_operations_count: """ query: Sequence["KnnQueryProfileResult"] rewrite_time: int collector: Sequence["KnnCollectorResult"] vector_operations_count: int class DfsProfile(AttrDict[Any]): """ :arg statistics: :arg knn: """ statistics: "DfsStatisticsProfile" knn: Sequence["DfsKnnProfile"] class DfsStatisticsBreakdown(AttrDict[Any]): """ :arg collection_statistics: (required) :arg collection_statistics_count: (required) :arg create_weight: (required) :arg create_weight_count: (required) :arg rewrite: (required) :arg rewrite_count: (required) :arg term_statistics: (required) :arg term_statistics_count: (required) """ collection_statistics: int collection_statistics_count: int create_weight: int create_weight_count: int rewrite: int rewrite_count: int term_statistics: int term_statistics_count: int class DfsStatisticsProfile(AttrDict[Any]): """ :arg type: (required) :arg description: (required) :arg time_in_nanos: (required) :arg breakdown: (required) :arg time: :arg debug: :arg children: """ type: str description: str time_in_nanos: Any breakdown: "DfsStatisticsBreakdown" time: Any debug: Mapping[str, Any] children: Sequence["DfsStatisticsProfile"] class DoubleTermsAggregate(AttrDict[Any]): """ Result of a `terms` aggregation when the field is some kind of decimal number like a float, double, or distance. :arg doc_count_error_upper_bound: :arg sum_other_doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ doc_count_error_upper_bound: int sum_other_doc_count: int buckets: Sequence["DoubleTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "DoubleTermsBucket"]: return self.buckets # type: ignore[return-value] class DoubleTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: :arg doc_count_error_upper_bound: """ key: float doc_count: int key_as_string: str doc_count_error_upper_bound: int class ErrorCause(AttrDict[Any]): """ Cause and details about a request failure. This class defines the properties common to all error types. Additional details are also provided, that depend on the error type. :arg type: (required) The type of error :arg reason: A human-readable explanation of the error, in English. :arg stack_trace: The server stack trace. Present only if the `error_trace=true` parameter was sent with the request. :arg caused_by: :arg root_cause: :arg suppressed: """ type: str reason: Union[str, None] stack_trace: str caused_by: "ErrorCause" root_cause: Sequence["ErrorCause"] suppressed: Sequence["ErrorCause"] class Explanation(AttrDict[Any]): """ :arg description: (required) :arg details: (required) :arg value: (required) """ description: str details: Sequence["ExplanationDetail"] value: float class ExplanationDetail(AttrDict[Any]): """ :arg description: (required) :arg value: (required) :arg details: """ description: str value: float details: Sequence["ExplanationDetail"] class ExtendedStatsAggregate(AttrDict[Any]): """ :arg sum_of_squares: (required) :arg variance: (required) :arg variance_population: (required) :arg variance_sampling: (required) :arg std_deviation: (required) :arg std_deviation_population: (required) :arg std_deviation_sampling: (required) :arg count: (required) :arg min: (required) :arg max: (required) :arg avg: (required) :arg sum: (required) :arg std_deviation_bounds: :arg sum_of_squares_as_string: :arg variance_as_string: :arg variance_population_as_string: :arg variance_sampling_as_string: :arg std_deviation_as_string: :arg std_deviation_bounds_as_string: :arg min_as_string: :arg max_as_string: :arg avg_as_string: :arg sum_as_string: :arg meta: """ sum_of_squares: Union[float, None] variance: Union[float, None] variance_population: Union[float, None] variance_sampling: Union[float, None] std_deviation: Union[float, None] std_deviation_population: Union[float, None] std_deviation_sampling: Union[float, None] count: int min: Union[float, None] max: Union[float, None] avg: Union[float, None] sum: float std_deviation_bounds: "StandardDeviationBounds" sum_of_squares_as_string: str variance_as_string: str variance_population_as_string: str variance_sampling_as_string: str std_deviation_as_string: str std_deviation_bounds_as_string: "StandardDeviationBoundsAsString" min_as_string: str max_as_string: str avg_as_string: str sum_as_string: str meta: Mapping[str, Any] class ExtendedStatsBucketAggregate(AttrDict[Any]): """ :arg sum_of_squares: (required) :arg variance: (required) :arg variance_population: (required) :arg variance_sampling: (required) :arg std_deviation: (required) :arg std_deviation_population: (required) :arg std_deviation_sampling: (required) :arg count: (required) :arg min: (required) :arg max: (required) :arg avg: (required) :arg sum: (required) :arg std_deviation_bounds: :arg sum_of_squares_as_string: :arg variance_as_string: :arg variance_population_as_string: :arg variance_sampling_as_string: :arg std_deviation_as_string: :arg std_deviation_bounds_as_string: :arg min_as_string: :arg max_as_string: :arg avg_as_string: :arg sum_as_string: :arg meta: """ sum_of_squares: Union[float, None] variance: Union[float, None] variance_population: Union[float, None] variance_sampling: Union[float, None] std_deviation: Union[float, None] std_deviation_population: Union[float, None] std_deviation_sampling: Union[float, None] count: int min: Union[float, None] max: Union[float, None] avg: Union[float, None] sum: float std_deviation_bounds: "StandardDeviationBounds" sum_of_squares_as_string: str variance_as_string: str variance_population_as_string: str variance_sampling_as_string: str std_deviation_as_string: str std_deviation_bounds_as_string: "StandardDeviationBoundsAsString" min_as_string: str max_as_string: str avg_as_string: str sum_as_string: str meta: Mapping[str, Any] class FetchProfile(AttrDict[Any]): """ :arg type: (required) :arg description: (required) :arg time_in_nanos: (required) :arg breakdown: (required) :arg debug: :arg children: """ type: str description: str time_in_nanos: Any breakdown: "FetchProfileBreakdown" debug: "FetchProfileDebug" children: Sequence["FetchProfile"] class FetchProfileBreakdown(AttrDict[Any]): """ :arg load_source: :arg load_source_count: :arg load_stored_fields: :arg load_stored_fields_count: :arg next_reader: :arg next_reader_count: :arg process_count: :arg process: """ load_source: int load_source_count: int load_stored_fields: int load_stored_fields_count: int next_reader: int next_reader_count: int process_count: int process: int class FetchProfileDebug(AttrDict[Any]): """ :arg stored_fields: :arg fast_path: """ stored_fields: Sequence[str] fast_path: int class FilterAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class FiltersAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["FiltersBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "FiltersBucket"]: return self.buckets # type: ignore[return-value] class FiltersBucket(AttrDict[Any]): """ :arg doc_count: (required) :arg key: """ doc_count: int key: str class FrequentItemSetsAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["FrequentItemSetsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "FrequentItemSetsBucket"]: return self.buckets # type: ignore[return-value] class FrequentItemSetsBucket(AttrDict[Any]): """ :arg key: (required) :arg support: (required) :arg doc_count: (required) """ key: Mapping[str, Sequence[str]] support: float doc_count: int class GeoBoundsAggregate(AttrDict[Any]): """ :arg bounds: :arg meta: """ bounds: Union[ "CoordsGeoBounds", "TopLeftBottomRightGeoBounds", "TopRightBottomLeftGeoBounds", "WktGeoBounds", ] meta: Mapping[str, Any] class GeoCentroidAggregate(AttrDict[Any]): """ :arg count: (required) :arg location: :arg meta: """ count: int location: Union["LatLonGeoLocation", "GeoHashLocation", Sequence[float], str] meta: Mapping[str, Any] class GeoDistanceAggregate(AttrDict[Any]): """ Result of a `geo_distance` aggregation. The unit for `from` and `to` is meters by default. :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["RangeBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: return self.buckets # type: ignore[return-value] class GeoHashGridAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["GeoHashGridBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "GeoHashGridBucket"]: return self.buckets # type: ignore[return-value] class GeoHashGridBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: str doc_count: int class GeoHexGridAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["GeoHexGridBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "GeoHexGridBucket"]: return self.buckets # type: ignore[return-value] class GeoHexGridBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: str doc_count: int class GeoLine(AttrDict[Any]): """ A GeoJson GeoLine. :arg type: (required) Always `"LineString"` :arg coordinates: (required) Array of `[lon, lat]` coordinates """ type: str coordinates: Sequence[Sequence[float]] class GeoLineAggregate(AttrDict[Any]): """ :arg type: (required) :arg geometry: (required) :arg properties: (required) :arg meta: """ type: str geometry: "GeoLine" properties: Any meta: Mapping[str, Any] class GeoTileGridAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["GeoTileGridBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "GeoTileGridBucket"]: return self.buckets # type: ignore[return-value] class GeoTileGridBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: str doc_count: int class GlobalAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class HdrPercentileRanksAggregate(AttrDict[Any]): """ :arg values: (required) :arg meta: """ values: Union[ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"] ] meta: Mapping[str, Any] class HdrPercentilesAggregate(AttrDict[Any]): """ :arg values: (required) :arg meta: """ values: Union[ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"] ] meta: Mapping[str, Any] class HistogramAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["HistogramBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "HistogramBucket"]: return self.buckets # type: ignore[return-value] class HistogramBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: """ key: float doc_count: int key_as_string: str class Hit(AttrDict[Any]): """ :arg index: (required) :arg id: :arg score: :arg explanation: :arg fields: :arg highlight: :arg inner_hits: :arg matched_queries: :arg nested: :arg ignored: :arg ignored_field_values: :arg shard: :arg node: :arg routing: :arg source: :arg rank: :arg seq_no: :arg primary_term: :arg version: :arg sort: """ index: str id: str score: Union[float, None] explanation: "Explanation" fields: Mapping[str, Any] highlight: Mapping[str, Sequence[str]] inner_hits: Mapping[str, "InnerHitsResult"] matched_queries: Union[Sequence[str], Mapping[str, float]] nested: "NestedIdentity" ignored: Sequence[str] ignored_field_values: Mapping[str, Sequence[Any]] shard: str node: str routing: str source: Any rank: int seq_no: int primary_term: int version: int sort: Sequence[Union[int, float, str, bool, None]] class HitsMetadata(AttrDict[Any]): """ :arg hits: (required) :arg total: Total hit count information, present only if `track_total_hits` wasn't `false` in the search request. :arg max_score: """ hits: Sequence["Hit"] total: Union["TotalHits", int] max_score: Union[float, None] class InferenceAggregate(AttrDict[Any]): """ :arg value: :arg feature_importance: :arg top_classes: :arg warning: :arg meta: """ value: Union[int, float, str, bool, None] feature_importance: Sequence["InferenceFeatureImportance"] top_classes: Sequence["InferenceTopClassEntry"] warning: str meta: Mapping[str, Any] class InferenceClassImportance(AttrDict[Any]): """ :arg class_name: (required) :arg importance: (required) """ class_name: str importance: float class InferenceFeatureImportance(AttrDict[Any]): """ :arg feature_name: (required) :arg importance: :arg classes: """ feature_name: str importance: float classes: Sequence["InferenceClassImportance"] class InferenceTopClassEntry(AttrDict[Any]): """ :arg class_name: (required) :arg class_probability: (required) :arg class_score: (required) """ class_name: Union[int, float, str, bool, None] class_probability: float class_score: float class InnerHitsResult(AttrDict[Any]): """ :arg hits: (required) """ hits: "HitsMetadata" class IpPrefixAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["IpPrefixBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "IpPrefixBucket"]: return self.buckets # type: ignore[return-value] class IpPrefixBucket(AttrDict[Any]): """ :arg is_ipv6: (required) :arg key: (required) :arg prefix_length: (required) :arg doc_count: (required) :arg netmask: """ is_ipv6: bool key: str prefix_length: int doc_count: int netmask: str class IpRangeAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["IpRangeBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "IpRangeBucket"]: return self.buckets # type: ignore[return-value] class IpRangeBucket(AttrDict[Any]): """ :arg doc_count: (required) :arg key: :arg from: :arg to: """ doc_count: int key: str from_: str to: str class KnnCollectorResult(AttrDict[Any]): """ :arg name: (required) :arg reason: (required) :arg time_in_nanos: (required) :arg time: :arg children: """ name: str reason: str time_in_nanos: Any time: Any children: Sequence["KnnCollectorResult"] class KnnQueryProfileBreakdown(AttrDict[Any]): """ :arg advance: (required) :arg advance_count: (required) :arg build_scorer: (required) :arg build_scorer_count: (required) :arg compute_max_score: (required) :arg compute_max_score_count: (required) :arg count_weight: (required) :arg count_weight_count: (required) :arg create_weight: (required) :arg create_weight_count: (required) :arg match: (required) :arg match_count: (required) :arg next_doc: (required) :arg next_doc_count: (required) :arg score: (required) :arg score_count: (required) :arg set_min_competitive_score: (required) :arg set_min_competitive_score_count: (required) :arg shallow_advance: (required) :arg shallow_advance_count: (required) """ advance: int advance_count: int build_scorer: int build_scorer_count: int compute_max_score: int compute_max_score_count: int count_weight: int count_weight_count: int create_weight: int create_weight_count: int match: int match_count: int next_doc: int next_doc_count: int score: int score_count: int set_min_competitive_score: int set_min_competitive_score_count: int shallow_advance: int shallow_advance_count: int class KnnQueryProfileResult(AttrDict[Any]): """ :arg type: (required) :arg description: (required) :arg time_in_nanos: (required) :arg breakdown: (required) :arg time: :arg debug: :arg children: """ type: str description: str time_in_nanos: Any breakdown: "KnnQueryProfileBreakdown" time: Any debug: Mapping[str, Any] children: Sequence["KnnQueryProfileResult"] class LongRareTermsAggregate(AttrDict[Any]): """ Result of the `rare_terms` aggregation when the field is some kind of whole number like a integer, long, or a date. :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["LongRareTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "LongRareTermsBucket"]: return self.buckets # type: ignore[return-value] class LongRareTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: """ key: int doc_count: int key_as_string: str class LongTermsAggregate(AttrDict[Any]): """ Result of a `terms` aggregation when the field is some kind of whole number like a integer, long, or a date. :arg doc_count_error_upper_bound: :arg sum_other_doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ doc_count_error_upper_bound: int sum_other_doc_count: int buckets: Sequence["LongTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "LongTermsBucket"]: return self.buckets # type: ignore[return-value] class LongTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: :arg doc_count_error_upper_bound: """ key: int doc_count: int key_as_string: str doc_count_error_upper_bound: int class MatrixStatsAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg fields: :arg meta: """ doc_count: int fields: Sequence["MatrixStatsFields"] meta: Mapping[str, Any] class MatrixStatsFields(AttrDict[Any]): """ :arg name: (required) :arg count: (required) :arg mean: (required) :arg variance: (required) :arg skewness: (required) :arg kurtosis: (required) :arg covariance: (required) :arg correlation: (required) """ name: str count: int mean: float variance: float skewness: float kurtosis: float covariance: Mapping[str, float] correlation: Mapping[str, float] class MaxAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class MedianAbsoluteDeviationAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class MinAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class MissingAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class MultiTermsAggregate(AttrDict[Any]): """ :arg doc_count_error_upper_bound: :arg sum_other_doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ doc_count_error_upper_bound: int sum_other_doc_count: int buckets: Sequence["MultiTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "MultiTermsBucket"]: return self.buckets # type: ignore[return-value] class MultiTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg key_as_string: :arg doc_count_error_upper_bound: """ key: Sequence[Union[int, float, str, bool, None]] doc_count: int key_as_string: str doc_count_error_upper_bound: int class NestedAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class NestedIdentity(AttrDict[Any]): """ :arg field: (required) :arg offset: (required) :arg _nested: """ field: str offset: int _nested: "NestedIdentity" class ParentAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class PercentilesBucketAggregate(AttrDict[Any]): """ :arg values: (required) :arg meta: """ values: Union[ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"] ] meta: Mapping[str, Any] class PhraseSuggest(AttrDict[Any]): """ :arg options: (required) :arg length: (required) :arg offset: (required) :arg text: (required) """ options: Sequence["PhraseSuggestOption"] length: int offset: int text: str class PhraseSuggestOption(AttrDict[Any]): """ :arg text: (required) :arg score: (required) :arg highlighted: :arg collate_match: """ text: str score: float highlighted: str collate_match: bool class Profile(AttrDict[Any]): """ :arg shards: (required) """ shards: Sequence["ShardProfile"] class QueryBreakdown(AttrDict[Any]): """ :arg advance: (required) :arg advance_count: (required) :arg build_scorer: (required) :arg build_scorer_count: (required) :arg create_weight: (required) :arg create_weight_count: (required) :arg match: (required) :arg match_count: (required) :arg shallow_advance: (required) :arg shallow_advance_count: (required) :arg next_doc: (required) :arg next_doc_count: (required) :arg score: (required) :arg score_count: (required) :arg compute_max_score: (required) :arg compute_max_score_count: (required) :arg count_weight: (required) :arg count_weight_count: (required) :arg set_min_competitive_score: (required) :arg set_min_competitive_score_count: (required) """ advance: int advance_count: int build_scorer: int build_scorer_count: int create_weight: int create_weight_count: int match: int match_count: int shallow_advance: int shallow_advance_count: int next_doc: int next_doc_count: int score: int score_count: int compute_max_score: int compute_max_score_count: int count_weight: int count_weight_count: int set_min_competitive_score: int set_min_competitive_score_count: int class QueryProfile(AttrDict[Any]): """ :arg breakdown: (required) :arg description: (required) :arg time_in_nanos: (required) :arg type: (required) :arg children: """ breakdown: "QueryBreakdown" description: str time_in_nanos: Any type: str children: Sequence["QueryProfile"] class RangeAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["RangeBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "RangeBucket"]: return self.buckets # type: ignore[return-value] class RangeBucket(AttrDict[Any]): """ :arg doc_count: (required) :arg from: :arg to: :arg from_as_string: :arg to_as_string: :arg key: The bucket key. Present if the aggregation is _not_ keyed """ doc_count: int from_: float to: float from_as_string: str to_as_string: str key: str class RateAggregate(AttrDict[Any]): """ :arg value: (required) :arg value_as_string: :arg meta: """ value: float value_as_string: str meta: Mapping[str, Any] class Retries(AttrDict[Any]): """ :arg bulk: (required) The number of bulk actions retried. :arg search: (required) The number of search actions retried. """ bulk: int search: int class ReverseNestedAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class SamplerAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class ScriptedMetricAggregate(AttrDict[Any]): """ :arg value: (required) :arg meta: """ value: Any meta: Mapping[str, Any] class SearchProfile(AttrDict[Any]): """ :arg collector: (required) :arg query: (required) :arg rewrite_time: (required) """ collector: Sequence["Collector"] query: Sequence["QueryProfile"] rewrite_time: int class ShardFailure(AttrDict[Any]): """ :arg reason: (required) :arg index: :arg node: :arg shard: :arg status: :arg primary: """ reason: "ErrorCause" index: str node: str shard: int status: str primary: bool class ShardProfile(AttrDict[Any]): """ :arg aggregations: (required) :arg cluster: (required) :arg id: (required) :arg index: (required) :arg node_id: (required) :arg searches: (required) :arg shard_id: (required) :arg dfs: :arg fetch: """ aggregations: Sequence["AggregationProfile"] cluster: str id: str index: str node_id: str searches: Sequence["SearchProfile"] shard_id: int dfs: "DfsProfile" fetch: "FetchProfile" class ShardStatistics(AttrDict[Any]): """ :arg failed: (required) The number of shards the operation or search attempted to run on but failed. :arg successful: (required) The number of shards the operation or search succeeded on. :arg total: (required) The number of shards the operation or search will run on overall. :arg failures: :arg skipped: """ failed: int successful: int total: int failures: Sequence["ShardFailure"] skipped: int class SignificantLongTermsAggregate(AttrDict[Any]): """ :arg bg_count: :arg doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ bg_count: int doc_count: int buckets: Sequence["SignificantLongTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "SignificantLongTermsBucket"]: return self.buckets # type: ignore[return-value] class SignificantLongTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg score: (required) :arg bg_count: (required) :arg doc_count: (required) :arg key_as_string: """ key: int score: float bg_count: int doc_count: int key_as_string: str class SignificantStringTermsAggregate(AttrDict[Any]): """ :arg bg_count: :arg doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ bg_count: int doc_count: int buckets: Sequence["SignificantStringTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "SignificantStringTermsBucket"]: return self.buckets # type: ignore[return-value] class SignificantStringTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg score: (required) :arg bg_count: (required) :arg doc_count: (required) """ key: str score: float bg_count: int doc_count: int class SimpleValueAggregate(AttrDict[Any]): """ :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class StandardDeviationBounds(AttrDict[Any]): """ :arg upper: (required) :arg lower: (required) :arg upper_population: (required) :arg lower_population: (required) :arg upper_sampling: (required) :arg lower_sampling: (required) """ upper: Union[float, None] lower: Union[float, None] upper_population: Union[float, None] lower_population: Union[float, None] upper_sampling: Union[float, None] lower_sampling: Union[float, None] class StandardDeviationBoundsAsString(AttrDict[Any]): """ :arg upper: (required) :arg lower: (required) :arg upper_population: (required) :arg lower_population: (required) :arg upper_sampling: (required) :arg lower_sampling: (required) """ upper: str lower: str upper_population: str lower_population: str upper_sampling: str lower_sampling: str class StatsAggregate(AttrDict[Any]): """ Statistics aggregation result. `min`, `max` and `avg` are missing if there were no values to process (`count` is zero). :arg count: (required) :arg min: (required) :arg max: (required) :arg avg: (required) :arg sum: (required) :arg min_as_string: :arg max_as_string: :arg avg_as_string: :arg sum_as_string: :arg meta: """ count: int min: Union[float, None] max: Union[float, None] avg: Union[float, None] sum: float min_as_string: str max_as_string: str avg_as_string: str sum_as_string: str meta: Mapping[str, Any] class StatsBucketAggregate(AttrDict[Any]): """ :arg count: (required) :arg min: (required) :arg max: (required) :arg avg: (required) :arg sum: (required) :arg min_as_string: :arg max_as_string: :arg avg_as_string: :arg sum_as_string: :arg meta: """ count: int min: Union[float, None] max: Union[float, None] avg: Union[float, None] sum: float min_as_string: str max_as_string: str avg_as_string: str sum_as_string: str meta: Mapping[str, Any] class StringRareTermsAggregate(AttrDict[Any]): """ Result of the `rare_terms` aggregation when the field is a string. :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["StringRareTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "StringRareTermsBucket"]: return self.buckets # type: ignore[return-value] class StringRareTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: str doc_count: int class StringStatsAggregate(AttrDict[Any]): """ :arg count: (required) :arg min_length: (required) :arg max_length: (required) :arg avg_length: (required) :arg entropy: (required) :arg distribution: :arg min_length_as_string: :arg max_length_as_string: :arg avg_length_as_string: :arg meta: """ count: int min_length: Union[int, None] max_length: Union[int, None] avg_length: Union[float, None] entropy: Union[float, None] distribution: Union[Mapping[str, float], None] min_length_as_string: str max_length_as_string: str avg_length_as_string: str meta: Mapping[str, Any] class StringTermsAggregate(AttrDict[Any]): """ Result of a `terms` aggregation when the field is a string. :arg doc_count_error_upper_bound: :arg sum_other_doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ doc_count_error_upper_bound: int sum_other_doc_count: int buckets: Sequence["StringTermsBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "StringTermsBucket"]: return self.buckets # type: ignore[return-value] class StringTermsBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) :arg doc_count_error_upper_bound: """ key: Union[int, float, str, bool, None] doc_count: int doc_count_error_upper_bound: int class SumAggregate(AttrDict[Any]): """ Sum aggregation result. `value` is always present and is zero if there were no values to process. :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class TDigestPercentileRanksAggregate(AttrDict[Any]): """ :arg values: (required) :arg meta: """ values: Union[ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"] ] meta: Mapping[str, Any] class TDigestPercentilesAggregate(AttrDict[Any]): """ :arg values: (required) :arg meta: """ values: Union[ Mapping[str, Union[str, float, None]], Sequence["ArrayPercentilesItem"] ] meta: Mapping[str, Any] class TTestAggregate(AttrDict[Any]): """ :arg value: (required) :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class TermSuggest(AttrDict[Any]): """ :arg options: (required) :arg length: (required) :arg offset: (required) :arg text: (required) """ options: Sequence["TermSuggestOption"] length: int offset: int text: str class TermSuggestOption(AttrDict[Any]): """ :arg text: (required) :arg score: (required) :arg freq: (required) :arg highlighted: :arg collate_match: """ text: str score: float freq: int highlighted: str collate_match: bool class TimeSeriesAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["TimeSeriesBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "TimeSeriesBucket"]: return self.buckets # type: ignore[return-value] class TimeSeriesBucket(AttrDict[Any]): """ :arg key: (required) :arg doc_count: (required) """ key: Mapping[str, Union[int, float, str, bool, None]] doc_count: int class TopHitsAggregate(AttrDict[Any]): """ :arg hits: (required) :arg meta: """ hits: "HitsMetadata" meta: Mapping[str, Any] class TopMetrics(AttrDict[Any]): """ :arg sort: (required) :arg metrics: (required) """ sort: Sequence[Union[Union[int, float, str, bool, None], None]] metrics: Mapping[str, Union[Union[int, float, str, bool, None], None]] class TopMetricsAggregate(AttrDict[Any]): """ :arg top: (required) :arg meta: """ top: Sequence["TopMetrics"] meta: Mapping[str, Any] class TotalHits(AttrDict[Any]): """ :arg relation: (required) :arg value: (required) """ relation: Literal["eq", "gte"] value: int class UnmappedRareTermsAggregate(AttrDict[Any]): """ Result of a `rare_terms` aggregation when the field is unmapped. `buckets` is always empty. :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence[Any] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, Any]: return self.buckets # type: ignore[return-value] class UnmappedSamplerAggregate(AttrDict[Any]): """ :arg doc_count: (required) :arg meta: """ doc_count: int meta: Mapping[str, Any] class UnmappedSignificantTermsAggregate(AttrDict[Any]): """ Result of the `significant_terms` aggregation on an unmapped field. `buckets` is always empty. :arg bg_count: :arg doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ bg_count: int doc_count: int buckets: Sequence[Any] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, Any]: return self.buckets # type: ignore[return-value] class UnmappedTermsAggregate(AttrDict[Any]): """ Result of a `terms` aggregation when the field is unmapped. `buckets` is always empty. :arg doc_count_error_upper_bound: :arg sum_other_doc_count: :arg buckets: (required) the aggregation buckets as a list :arg meta: """ doc_count_error_upper_bound: int sum_other_doc_count: int buckets: Sequence[Any] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, Any]: return self.buckets # type: ignore[return-value] class ValueCountAggregate(AttrDict[Any]): """ Value count aggregation result. `value` is always present. :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] class VariableWidthHistogramAggregate(AttrDict[Any]): """ :arg buckets: (required) the aggregation buckets as a list :arg meta: """ buckets: Sequence["VariableWidthHistogramBucket"] meta: Mapping[str, Any] @property def buckets_as_dict(self) -> Mapping[str, "VariableWidthHistogramBucket"]: return self.buckets # type: ignore[return-value] class VariableWidthHistogramBucket(AttrDict[Any]): """ :arg min: (required) :arg key: (required) :arg max: (required) :arg doc_count: (required) :arg min_as_string: :arg key_as_string: :arg max_as_string: """ min: float key: float max: float doc_count: int min_as_string: str key_as_string: str max_as_string: str class WeightedAvgAggregate(AttrDict[Any]): """ Weighted average aggregation result. `value` is missing if the weight was set to zero. :arg value: (required) The metric value. A missing value generally means that there was no data to aggregate, unless specified otherwise. :arg value_as_string: :arg meta: """ value: Union[float, None] value_as_string: str meta: Mapping[str, Any] python-elasticsearch-9.1.1/elasticsearch/dsl/update_by_query.py000066400000000000000000000016301506101734100250200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ._async.update_by_query import AsyncUpdateByQuery # noqa: F401 from ._sync.update_by_query import UpdateByQuery # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/dsl/update_by_query_base.py000066400000000000000000000115131506101734100260130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Type from typing_extensions import Self from .query import Bool, Q from .response import UpdateByQueryResponse from .search_base import ProxyDescriptor, QueryProxy, Request from .utils import _R, recursive_to_dict class UpdateByQueryBase(Request[_R]): query = ProxyDescriptor[Self]("query") def __init__(self, **kwargs: Any): """ Update by query request to elasticsearch. :arg using: `Elasticsearch` instance to use :arg index: limit the search to index :arg doc_type: only query this type. All the parameters supplied (or omitted) at creation type can be later overridden by methods (`using`, `index` and `doc_type` respectively). """ super().__init__(**kwargs) self._response_class = UpdateByQueryResponse[_R] self._script: Dict[str, Any] = {} self._query_proxy = QueryProxy(self, "query") def filter(self, *args: Any, **kwargs: Any) -> Self: return self.query(Bool(filter=[Q(*args, **kwargs)])) def exclude(self, *args: Any, **kwargs: Any) -> Self: return self.query(Bool(filter=[~Q(*args, **kwargs)])) @classmethod def from_dict(cls, d: Dict[str, Any]) -> Self: """ Construct a new `UpdateByQuery` instance from a raw dict containing the search body. Useful when migrating from raw dictionaries. Example:: ubq = UpdateByQuery.from_dict({ "query": { "bool": { "must": [...] } }, "script": {...} }) ubq = ubq.filter('term', published=True) """ u = cls() u.update_from_dict(d) return u def _clone(self) -> Self: """ Return a clone of the current search request. Performs a shallow copy of all the underlying objects. Used internally by most state modifying APIs. """ ubq = super()._clone() ubq._response_class = self._response_class ubq._script = self._script.copy() ubq.query._proxied = self.query._proxied return ubq def response_class(self, cls: Type[UpdateByQueryResponse[_R]]) -> Self: """ Override the default wrapper used for the response. """ ubq = self._clone() ubq._response_class = cls return ubq def update_from_dict(self, d: Dict[str, Any]) -> Self: """ Apply options from a serialized body to the current instance. Modifies the object in-place. Used mostly by ``from_dict``. """ d = d.copy() if "query" in d: self.query._proxied = Q(d.pop("query")) if "script" in d: self._script = d.pop("script") self._extra.update(d) return self def script(self, **kwargs: Any) -> Self: """ Define update action to take: https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting-using.html for more details. Note: the API only accepts a single script, so calling the script multiple times will overwrite. Example:: ubq = Search() ubq = ubq.script(source="ctx._source.likes++"") ubq = ubq.script(source="ctx._source.likes += params.f"", lang="expression", params={'f': 3}) """ ubq = self._clone() if ubq._script: ubq._script = {} ubq._script.update(kwargs) return ubq def to_dict(self, **kwargs: Any) -> Dict[str, Any]: """ Serialize the search into the dictionary that will be sent over as the request'ubq body. All additional keyword arguments will be included into the dictionary. """ d = {} if self.query: d["query"] = self.query.to_dict() if self._script: d["script"] = self._script d.update(recursive_to_dict(self._extra)) d.update(recursive_to_dict(kwargs)) return d python-elasticsearch-9.1.1/elasticsearch/dsl/utils.py000066400000000000000000000547301506101734100227700ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import copy from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generic, Iterable, Iterator, List, Mapping, Optional, Tuple, Type, Union, cast, ) from elastic_transport.client_utils import DEFAULT from typing_extensions import Self, TypeAlias, TypeVar from .exceptions import UnknownDslObject, ValidationException if TYPE_CHECKING: from elastic_transport import ObjectApiResponse from elasticsearch import AsyncElasticsearch, Elasticsearch from .document_base import DocumentOptions from .field import Field from .index_base import IndexBase from .response import Hit # noqa: F401 from .types import Hit as HitBaseType UsingType: TypeAlias = Union[str, "Elasticsearch"] AsyncUsingType: TypeAlias = Union[str, "AsyncElasticsearch"] AnyUsingType: TypeAlias = Union[str, "Elasticsearch", "AsyncElasticsearch"] _ValT = TypeVar("_ValT") # used by AttrDict _R = TypeVar("_R", default="Hit") # used by Search and Response classes SKIP_VALUES = ("", None) EXPAND__TO_DOT = True DOC_META_FIELDS = frozenset( ( "id", "routing", ) ) META_FIELDS = frozenset( ( # Elasticsearch metadata fields, except 'type' "index", "using", "score", "version", "seq_no", "primary_term", ) ).union(DOC_META_FIELDS) def _wrap(val: Any, obj_wrapper: Optional[Callable[[Any], Any]] = None) -> Any: if isinstance(val, dict): return AttrDict(val) if obj_wrapper is None else obj_wrapper(val) if isinstance(val, list): return AttrList(val) return val def _recursive_to_dict(value: Any) -> Any: if hasattr(value, "to_dict"): return value.to_dict() elif isinstance(value, dict) or isinstance(value, AttrDict): return {k: _recursive_to_dict(v) for k, v in value.items()} elif isinstance(value, list) or isinstance(value, AttrList): return [recursive_to_dict(elem) for elem in value] else: return value class AttrList(Generic[_ValT]): def __init__( self, l: List[_ValT], obj_wrapper: Optional[Callable[[_ValT], Any]] = None ): # make iterables into lists if not isinstance(l, list): l = list(l) self._l_ = l self._obj_wrapper = obj_wrapper def __repr__(self) -> str: return repr(self._l_) def __eq__(self, other: Any) -> bool: if isinstance(other, AttrList): return other._l_ == self._l_ # make sure we still equal to a dict with the same data return bool(other == self._l_) def __ne__(self, other: Any) -> bool: return not self == other def __getitem__(self, k: Union[int, slice]) -> Any: l = self._l_[k] if isinstance(k, slice): return AttrList[_ValT](l, obj_wrapper=self._obj_wrapper) # type: ignore[arg-type] return _wrap(l, self._obj_wrapper) def __setitem__(self, k: int, value: _ValT) -> None: self._l_[k] = value def __iter__(self) -> Iterator[Any]: return map(lambda i: _wrap(i, self._obj_wrapper), self._l_) def __len__(self) -> int: return len(self._l_) def __nonzero__(self) -> bool: return bool(self._l_) __bool__ = __nonzero__ def __getattr__(self, name: str) -> Any: return getattr(self._l_, name) def __getstate__(self) -> Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]]: return self._l_, self._obj_wrapper def __setstate__( self, state: Tuple[List[_ValT], Optional[Callable[[_ValT], Any]]] ) -> None: self._l_, self._obj_wrapper = state def to_list(self) -> List[_ValT]: return self._l_ class AttrDict(Generic[_ValT]): """ Helper class to provide attribute like access (read and write) to dictionaries. Used to provide a convenient way to access both results and nested dsl dicts. """ _d_: Dict[str, _ValT] RESERVED: Dict[str, str] = {"from_": "from"} def __init__(self, d: Dict[str, _ValT]): # assign the inner dict manually to prevent __setattr__ from firing super().__setattr__("_d_", d) def __contains__(self, key: object) -> bool: return key in self._d_ def __nonzero__(self) -> bool: return bool(self._d_) __bool__ = __nonzero__ def __dir__(self) -> List[str]: # introspection for auto-complete in IPython etc return list(self._d_.keys()) def __eq__(self, other: Any) -> bool: if isinstance(other, AttrDict): return other._d_ == self._d_ # make sure we still equal to a dict with the same data return bool(other == self._d_) def __ne__(self, other: Any) -> bool: return not self == other def __repr__(self) -> str: r = repr(self._d_) if len(r) > 60: r = r[:60] + "...}" return r def __getstate__(self) -> Tuple[Dict[str, _ValT]]: return (self._d_,) def __setstate__(self, state: Tuple[Dict[str, _ValT]]) -> None: super().__setattr__("_d_", state[0]) def __getattr__(self, attr_name: str) -> Any: try: return self.__getitem__(attr_name) except KeyError: raise AttributeError( f"{self.__class__.__name__!r} object has no attribute {attr_name!r}" ) def __delattr__(self, attr_name: str) -> None: try: del self._d_[self.RESERVED.get(attr_name, attr_name)] except KeyError: raise AttributeError( f"{self.__class__.__name__!r} object has no attribute {attr_name!r}" ) def __getitem__(self, key: str) -> Any: return _wrap(self._d_[self.RESERVED.get(key, key)]) def __setitem__(self, key: str, value: _ValT) -> None: self._d_[self.RESERVED.get(key, key)] = value def __delitem__(self, key: str) -> None: del self._d_[self.RESERVED.get(key, key)] def __setattr__(self, name: str, value: _ValT) -> None: # the __orig__class__ attribute has to be treated as an exception, as # is it added to an object when it is instantiated with type arguments if ( name in self._d_ or not hasattr(self.__class__, name) ) and name != "__orig_class__": self._d_[self.RESERVED.get(name, name)] = value else: # there is an attribute on the class (could be property, ..) - don't add it as field super().__setattr__(name, value) def __iter__(self) -> Iterator[str]: return iter(self._d_) def to_dict(self, recursive: bool = False) -> Dict[str, _ValT]: return cast( Dict[str, _ValT], _recursive_to_dict(self._d_) if recursive else self._d_ ) def keys(self) -> Iterable[str]: return self._d_.keys() def items(self) -> Iterable[Tuple[str, _ValT]]: return self._d_.items() class DslMeta(type): """ Base Metaclass for DslBase subclasses that builds a registry of all classes for given DslBase subclass (== all the query types for the Query subclass of DslBase). It then uses the information from that registry (as well as `name` and `shortcut` attributes from the base class) to construct any subclass based on it's name. For typical use see `QueryMeta` and `Query` in `elasticsearch.dsl.query`. """ name: str _classes: Dict[str, type] _type_name: str _types: ClassVar[Dict[str, Type["DslBase"]]] = {} def __init__(cls, name: str, bases: Tuple[type, ...], attrs: Dict[str, Any]): super().__init__(name, bases, attrs) # skip for DslBase if not hasattr(cls, "_type_shortcut"): return if not cls.name: # abstract base class, register it's shortcut cls._types[cls._type_name] = cls._type_shortcut # and create a registry for subclasses if not hasattr(cls, "_classes"): cls._classes = {} elif cls.name not in cls._classes: # normal class, register it cls._classes[cls.name] = cls @classmethod def get_dsl_type(cls, name: str) -> Type["DslBase"]: try: return cls._types[name] except KeyError: raise UnknownDslObject(f"DSL type {name} does not exist.") class DslBase(metaclass=DslMeta): """ Base class for all DSL objects - queries, filters, aggregations etc. Wraps a dictionary representing the object's json. Provides several feature: - attribute access to the wrapped dictionary (.field instead of ['field']) - _clone method returning a copy of self - to_dict method to serialize into dict (to be sent via elasticsearch-py) - basic logical operators (&, | and ~) using a Bool(Filter|Query) TODO: move into a class specific for Query/Filter - respects the definition of the class and (de)serializes it's attributes based on the `_param_defs` definition (for example turning all values in the `must` attribute into Query objects) """ _param_defs: ClassVar[Dict[str, Dict[str, Union[str, bool]]]] = {} @classmethod def get_dsl_class( cls: Type[Self], name: str, default: Optional[str] = None ) -> Type[Self]: try: return cls._classes[name] except KeyError: if default is not None: return cls._classes[default] raise UnknownDslObject( f"DSL class `{name}` does not exist in {cls._type_name}." ) def __init__(self, _expand__to_dot: Optional[bool] = None, **params: Any) -> None: if _expand__to_dot is None: _expand__to_dot = EXPAND__TO_DOT self._params: Dict[str, Any] = {} for pname, pvalue in params.items(): if pvalue is DEFAULT: continue # expand "__" to dots if "__" in pname and _expand__to_dot: pname = pname.replace("__", ".") # convert instrumented fields to string if type(pvalue).__name__ == "InstrumentedField": pvalue = str(pvalue) self._setattr(pname, pvalue) def _repr_params(self) -> str: """Produce a repr of all our parameters to be used in __repr__.""" return ", ".join( f"{n.replace('.', '__')}={v!r}" for (n, v) in sorted(self._params.items()) # make sure we don't include empty typed params if "type" not in self._param_defs.get(n, {}) or v ) def __repr__(self) -> str: return f"{self.__class__.__name__}({self._repr_params()})" def __eq__(self, other: Any) -> bool: return isinstance(other, self.__class__) and other.to_dict() == self.to_dict() def __ne__(self, other: Any) -> bool: return not self == other def __setattr__(self, name: str, value: Any) -> None: if name.startswith("_"): return super().__setattr__(name, value) return self._setattr(name, value) def _setattr(self, name: str, value: Any) -> None: # if this attribute has special type assigned to it... name = AttrDict.RESERVED.get(name, name) if name in self._param_defs: pinfo = self._param_defs[name] if "type" in pinfo: # get the shortcut used to construct this type (query.Q, aggs.A, etc) shortcut = self.__class__.get_dsl_type(str(pinfo["type"])) # list of dict(name -> DslBase) if pinfo.get("multi") and pinfo.get("hash"): if not isinstance(value, (tuple, list)): value = (value,) value = list( {k: shortcut(v) for (k, v) in obj.items()} for obj in value ) elif pinfo.get("multi"): if not isinstance(value, (tuple, list)): value = (value,) value = list(map(shortcut, value)) # dict(name -> DslBase), make sure we pickup all the objs elif pinfo.get("hash"): value = {k: shortcut(v) for (k, v) in value.items()} # single value object, just convert else: value = shortcut(value) self._params[name] = value def __getattr__(self, name: str) -> Any: if name.startswith("_"): raise AttributeError( f"{self.__class__.__name__!r} object has no attribute {name!r}" ) value = None try: value = self._params[name] except KeyError: # compound types should never throw AttributeError and return empty # container instead if name in self._param_defs: pinfo = self._param_defs[name] if pinfo.get("multi"): value = self._params.setdefault(name, []) elif pinfo.get("hash"): value = self._params.setdefault(name, {}) if value is None: raise AttributeError( f"{self.__class__.__name__!r} object has no attribute {name!r}" ) # wrap nested dicts in AttrDict for convenient access if isinstance(value, dict): return AttrDict(value) return value def to_dict(self) -> Dict[str, Any]: """ Serialize the DSL object to plain dict """ d = {} for pname, value in self._params.items(): pinfo = self._param_defs.get(pname) # typed param if pinfo and "type" in pinfo: # don't serialize empty lists and dicts for typed fields if value in ({}, []): continue # list of dict(name -> DslBase) if pinfo.get("multi") and pinfo.get("hash"): value = list( {k: v.to_dict() for k, v in obj.items()} for obj in value ) # multi-values are serialized as list of dicts elif pinfo.get("multi"): value = list(map(lambda x: x.to_dict(), value)) # squash all the hash values into one dict elif pinfo.get("hash"): value = {k: v.to_dict() for k, v in value.items()} # serialize single values else: value = value.to_dict() # serialize anything with to_dict method elif hasattr(value, "to_dict"): value = value.to_dict() d[pname] = value return {self.name: d} def _clone(self) -> Self: c = self.__class__() for attr in self._params: c._params[attr] = copy(self._params[attr]) return c if TYPE_CHECKING: HitMetaBase = HitBaseType else: HitMetaBase = AttrDict[Any] class HitMeta(HitMetaBase): inner_hits: Mapping[str, Any] def __init__( self, document: Dict[str, Any], exclude: Tuple[str, ...] = ("_source", "_fields"), ): d = { k[1:] if k.startswith("_") else k: v for (k, v) in document.items() if k not in exclude } if "type" in d: # make sure we are consistent everywhere in python d["doc_type"] = d.pop("type") super().__init__(d) class ObjectBase(AttrDict[Any]): _doc_type: "DocumentOptions" _index: "IndexBase" meta: HitMeta def __init__(self, meta: Optional[Dict[str, Any]] = None, **kwargs: Any): meta = meta or {} for k in list(kwargs): if k.startswith("_") and k[1:] in META_FIELDS: meta[k] = kwargs.pop(k) super(AttrDict, self).__setattr__("meta", HitMeta(meta)) # process field defaults if hasattr(self, "_defaults"): for name in self._defaults: if name not in kwargs: value = self._defaults[name] if callable(value): value = value() kwargs[name] = value super().__init__(kwargs) @classmethod def __list_fields(cls) -> Iterator[Tuple[str, "Field", bool]]: """ Get all the fields defined for our class, if we have an Index, try looking at the index mappings as well, mark the fields from Index as optional. """ for name in cls._doc_type.mapping: field = cls._doc_type.mapping[name] yield name, field, False if hasattr(cls.__class__, "_index"): if not cls._index._mapping: return for name in cls._index._mapping: # don't return fields that are in _doc_type if name in cls._doc_type.mapping: continue field = cls._index._mapping[name] yield name, field, True @classmethod def __get_field(cls, name: str) -> Optional["Field"]: try: return cls._doc_type.mapping[name] except KeyError: # fallback to fields on the Index if hasattr(cls, "_index") and cls._index._mapping: try: return cls._index._mapping[name] except KeyError: pass return None @classmethod def from_es(cls, hit: Union[Dict[str, Any], "ObjectApiResponse[Any]"]) -> Self: meta = hit.copy() data = meta.pop("_source", {}) doc = cls(meta=meta) doc._from_dict(data) return doc def _from_dict(self, data: Dict[str, Any]) -> None: for k, v in data.items(): f = self.__get_field(k) if f and f._coerce: v = f.deserialize(v) setattr(self, k, v) def __getstate__(self) -> Tuple[Dict[str, Any], Dict[str, Any]]: # type: ignore[override] return self.to_dict(), self.meta._d_ def __setstate__(self, state: Tuple[Dict[str, Any], Dict[str, Any]]) -> None: # type: ignore[override] data, meta = state super(AttrDict, self).__setattr__("_d_", {}) super(AttrDict, self).__setattr__("meta", HitMeta(meta)) self._from_dict(data) def __getattr__(self, name: str) -> Any: try: return super().__getattr__(name) except AttributeError: f = self.__get_field(name) if f is not None and hasattr(f, "empty"): value = f.empty() if value not in SKIP_VALUES: setattr(self, name, value) value = getattr(self, name) return value raise def __setattr__(self, name: str, value: Any) -> None: if name in self.__class__._doc_type.mapping: self._d_[name] = value else: super().__setattr__(name, value) def to_dict(self, skip_empty: bool = True) -> Dict[str, Any]: out = {} for k, v in self._d_.items(): # if this is a mapped field, f = self.__get_field(k) if f and f._coerce: v = f.serialize(v, skip_empty=skip_empty) # if someone assigned AttrList, unwrap it if isinstance(v, AttrList): v = v._l_ if skip_empty: # don't serialize empty values # careful not to include numeric zeros if v in ([], {}, None): continue out[k] = v return out def clean_fields(self, validate: bool = True) -> None: errors: Dict[str, List[ValidationException]] = {} for name, field, optional in self.__list_fields(): data = self._d_.get(name, None) if data is None and optional: continue try: # save the cleaned value data = field.clean(data) except ValidationException as e: errors.setdefault(name, []).append(e) if name in self._d_ or data not in ([], {}, None): self._d_[name] = cast(Any, data) if validate and errors: raise ValidationException(errors) def clean(self) -> None: pass def full_clean(self) -> None: self.clean_fields(validate=False) self.clean() self.clean_fields(validate=True) def merge( data: Union[Dict[str, Any], AttrDict[Any]], new_data: Union[Dict[str, Any], AttrDict[Any]], raise_on_conflict: bool = False, ) -> None: if not ( isinstance(data, (AttrDict, collections.abc.Mapping)) and isinstance(new_data, (AttrDict, collections.abc.Mapping)) ): raise ValueError( f"You can only merge two dicts! Got {data!r} and {new_data!r} instead." ) for key, value in new_data.items(): if ( key in data and isinstance(data[key], (AttrDict, collections.abc.Mapping)) and isinstance(value, (AttrDict, collections.abc.Mapping)) ): merge(data[key], value, raise_on_conflict) # type: ignore[arg-type] elif key in data and data[key] != value and raise_on_conflict: raise ValueError(f"Incompatible data for key {key!r}, cannot be merged.") else: data[key] = value def recursive_to_dict(data: Any) -> Any: """Recursively transform objects that potentially have .to_dict() into dictionary literals by traversing AttrList, AttrDict, list, tuple, and Mapping types. """ if isinstance(data, AttrList): data = list(data._l_) elif hasattr(data, "to_dict"): data = data.to_dict() if isinstance(data, (list, tuple)): return type(data)(recursive_to_dict(inner) for inner in data) elif isinstance(data, dict): return {key: recursive_to_dict(val) for key, val in data.items()} return data python-elasticsearch-9.1.1/elasticsearch/dsl/wrappers.py000066400000000000000000000100031506101734100234540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import operator from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, Literal, Mapping, Optional, Tuple, TypeVar, Union, cast, ) if TYPE_CHECKING: from _operator import _SupportsComparison from typing_extensions import TypeAlias from .utils import AttrDict ComparisonOperators: TypeAlias = Literal["lt", "lte", "gt", "gte"] RangeValT = TypeVar("RangeValT", bound="_SupportsComparison") __all__ = ["Range"] class Range(AttrDict[RangeValT]): OPS: ClassVar[ Mapping[ ComparisonOperators, Callable[["_SupportsComparison", "_SupportsComparison"], bool], ] ] = { "lt": operator.lt, "lte": operator.le, "gt": operator.gt, "gte": operator.ge, } def __init__( self, d: Optional[Dict[str, RangeValT]] = None, /, **kwargs: RangeValT, ): if d is not None and (kwargs or not isinstance(d, dict)): raise ValueError( "Range accepts a single dictionary or a set of keyword arguments." ) if d is None: data = kwargs else: data = d for k in data: if k not in self.OPS: raise ValueError(f"Range received an unknown operator {k!r}") if "gt" in data and "gte" in data: raise ValueError("You cannot specify both gt and gte for Range.") if "lt" in data and "lte" in data: raise ValueError("You cannot specify both lt and lte for Range.") super().__init__(data) def __repr__(self) -> str: return "Range(%s)" % ", ".join("%s=%r" % op for op in self._d_.items()) def __contains__(self, item: object) -> bool: if isinstance(item, str): return super().__contains__(item) item_supports_comp = any(hasattr(item, f"__{op}__") for op in self.OPS) if not item_supports_comp: return False for op in self.OPS: if op in self._d_ and not self.OPS[op]( cast("_SupportsComparison", item), self._d_[op] ): return False return True @property def upper(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]: if "lt" in self._d_: return self._d_["lt"], False if "lte" in self._d_: return self._d_["lte"], True return None, False @property def lower(self) -> Union[Tuple[RangeValT, bool], Tuple[None, Literal[False]]]: if "gt" in self._d_: return self._d_["gt"], False if "gte" in self._d_: return self._d_["gte"], True return None, False class AggregationRange(AttrDict[Any]): """ :arg from: Start of the range (inclusive). :arg key: Custom key to return the range with. :arg to: End of the range (exclusive). """ def __init__( self, *, from_: Any = None, key: Optional[str] = None, to: Any = None, **kwargs: Any, ): if from_ is not None: kwargs["from_"] = from_ if key is not None: kwargs["key"] = key if to is not None: kwargs["to"] = to super().__init__(kwargs) python-elasticsearch-9.1.1/elasticsearch/esql/000077500000000000000000000000001506101734100214275ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/esql/__init__.py000066400000000000000000000015661506101734100235500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from ..dsl import E # noqa: F401 from .esql import ESQL, ESQLBase, and_, not_, or_ # noqa: F401 python-elasticsearch-9.1.1/elasticsearch/esql/esql.py000066400000000000000000001246711506101734100227600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import re from abc import ABC, abstractmethod from typing import Any, Dict, Optional, Tuple, Type, Union from ..dsl.document_base import DocumentBase, InstrumentedExpression, InstrumentedField FieldType = Union[InstrumentedField, str] IndexType = Union[Type[DocumentBase], str] ExpressionType = Any class ESQL(ABC): """The static methods of the ``ESQL`` class provide access to the ES|QL source commands, used to create ES|QL queries. These methods return an instance of class ``ESQLBase``, which provides access to the ES|QL processing commands. """ @staticmethod def from_(*indices: IndexType) -> "From": """The ``FROM`` source command returns a table with data from a data stream, index, or alias. :param indices: A list of indices, data streams or aliases. Supports wildcards and date math. Examples:: query1 = ESQL.from_("employees") query2 = ESQL.from_("") query3 = ESQL.from_("employees-00001", "other-employees-*") query4 = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") query5 = ESQL.from_("employees").metadata("_id") """ return From(*indices) @staticmethod def row(**params: ExpressionType) -> "Row": """The ``ROW`` source command produces a row with one or more columns with values that you specify. This can be useful for testing. :param params: the column values to produce, given as keyword arguments. Examples:: query1 = ESQL.row(a=1, b="two", c=None) query2 = ESQL.row(a=[1, 2]) query3 = ESQL.row(a=functions.round(1.23, 0)) """ return Row(**params) @staticmethod def show(item: str) -> "Show": """The ``SHOW`` source command returns information about the deployment and its capabilities. :param item: Can only be ``INFO``. Examples:: query = ESQL.show("INFO") """ return Show(item) @staticmethod def branch() -> "Branch": """This method can only be used inside a ``FORK`` command to create each branch. Examples:: query = ESQL.from_("employees").fork( ESQL.branch().where("emp_no == 10001"), ESQL.branch().where("emp_no == 10002"), ) """ return Branch() class ESQLBase(ABC): """The methods of the ``ESQLBase`` class provide access to the ES|QL processing commands, used to build ES|QL queries. """ def __init__(self, parent: Optional["ESQLBase"] = None): self._parent = parent def __repr__(self) -> str: return self.render() def render(self) -> str: return ( self._parent.render() + "\n| " if self._parent else "" ) + self._render_internal() @abstractmethod def _render_internal(self) -> str: pass @staticmethod def _format_index(index: IndexType) -> str: return index._index._name if hasattr(index, "_index") else str(index) @staticmethod def _format_id(id: FieldType, allow_patterns: bool = False) -> str: s = str(id) # in case it is an InstrumentedField if allow_patterns and "*" in s: return s # patterns cannot be escaped if re.fullmatch(r"[a-zA-Z_@][a-zA-Z0-9_\.]*", s): return s # this identifier needs to be escaped s.replace("`", "``") return f"`{s}`" @staticmethod def _format_expr(expr: ExpressionType) -> str: return ( json.dumps(expr) if not isinstance(expr, (str, InstrumentedExpression)) else str(expr) ) def _is_forked(self) -> bool: if self.__class__.__name__ == "Fork": return True if self._parent: return self._parent._is_forked() return False def change_point(self, value: FieldType) -> "ChangePoint": """`CHANGE_POINT` detects spikes, dips, and change points in a metric. :param value: The column with the metric in which you want to detect a change point. Examples:: query = ( ESQL.row(key=list(range(1, 26))) .mv_expand("key") .eval(value=functions.case("key<13", 0, 42)) .change_point("value") .on("key") .where("type IS NOT NULL") ) """ return ChangePoint(self, value) def completion( self, *prompt: ExpressionType, **named_prompt: ExpressionType ) -> "Completion": """The `COMPLETION` command allows you to send prompts and context to a Large Language Model (LLM) directly within your ES|QL queries, to perform text generation tasks. :param prompt: The input text or expression used to prompt the LLM. This can be a string literal or a reference to a column containing text. :param named_prompt: The input text or expresion, given as a keyword argument. The argument name is used for the column name. If not specified, the results will be stored in a column named `completion`. If the specified column already exists, it will be overwritten with the new results. Examples:: query1 = ( ESQL.row(question="What is Elasticsearch?") .completion("question").with_("test_completion_model") .keep("question", "completion") ) query2 = ( ESQL.row(question="What is Elasticsearch?") .completion(answer="question").with_("test_completion_model") .keep("question", "answer") ) query3 = ( ESQL.from_("movies") .sort("rating DESC") .limit(10) .eval(prompt=\"\"\"CONCAT( "Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n", )\"\"\") .completion(summary="prompt").with_("test_completion_model") .keep("title", "summary", "rating") ) """ return Completion(self, *prompt, **named_prompt) def dissect(self, input: FieldType, pattern: str) -> "Dissect": """``DISSECT`` enables you to extract structured data out of a string. :param input: The column that contains the string you want to structure. If the column has multiple values, ``DISSECT`` will process each value. :param pattern: A dissect pattern. If a field name conflicts with an existing column, the existing column is dropped. If a field name is used more than once, only the rightmost duplicate creates a column. Examples:: query = ( ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1") .dissect("a", "%{date} - %{msg} - %{ip}") .keep("date", "msg", "ip") .eval(date="TO_DATETIME(date)") ) """ return Dissect(self, input, pattern) def drop(self, *columns: FieldType) -> "Drop": """The ``DROP`` processing command removes one or more columns. :param columns: The columns to drop, given as positional arguments. Supports wildcards. Examples:: query1 = ESQL.from_("employees").drop("height") query2 = ESQL.from_("employees").drop("height*") """ return Drop(self, *columns) def enrich(self, policy: str) -> "Enrich": """``ENRICH`` enables you to add data from existing indices as new columns using an enrich policy. :param policy: The name of the enrich policy. You need to create and execute the enrich policy first. Examples:: query1 = ( ESQL.row(a="1") .enrich("languages_policy").on("a").with_("language_name") ) query2 = ( ESQL.row(a="1") .enrich("languages_policy").on("a").with_(name="language_name") ) """ return Enrich(self, policy) def eval(self, *columns: ExpressionType, **named_columns: ExpressionType) -> "Eval": """The ``EVAL`` processing command enables you to append new columns with calculated values. :param columns: The values for the columns, given as positional arguments. Can be literals, expressions, or functions. Can use columns defined left of this one. :param named_columns: The values for the new columns, given as keyword arguments. The name of the arguments is used as column name. If a column with the same name already exists, the existing column is dropped. If a column name is used more than once, only the rightmost duplicate creates a column. Examples:: query1 = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(height_feet="height * 3.281", height_cm="height * 100") ) query2 = ( ESQL.from_("employees") .eval("height * 3.281") .stats(avg_height_feet=functions.avg("`height * 3.281`")) ) """ return Eval(self, *columns, **named_columns) def fork( self, fork1: "ESQLBase", fork2: Optional["ESQLBase"] = None, fork3: Optional["ESQLBase"] = None, fork4: Optional["ESQLBase"] = None, fork5: Optional["ESQLBase"] = None, fork6: Optional["ESQLBase"] = None, fork7: Optional["ESQLBase"] = None, fork8: Optional["ESQLBase"] = None, ) -> "Fork": """The ``FORK`` processing command creates multiple execution branches to operate on the same input data and combines the results in a single output table. :param fork: Up to 8 execution branches, created with the ``ESQL.branch()`` method. Examples:: query = ( ESQL.from_("employees") .fork( ESQL.branch().where("emp_no == 10001"), ESQL.branch().where("emp_no == 10002"), ) .keep("emp_no", "_fork") .sort("emp_no") ) """ if self._is_forked(): raise ValueError("a query can only have one fork") return Fork(self, fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8) def grok(self, input: FieldType, pattern: str) -> "Grok": """``GROK`` enables you to extract structured data out of a string. :param input: The column that contains the string you want to structure. If the column has multiple values, ``GROK`` will process each value. :param pattern: A grok pattern. If a field name conflicts with an existing column, the existing column is discarded. If a field name is used more than once, a multi-valued column will be created with one value per each occurrence of the field name. Examples:: query1 = ( ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") .grok("a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}") .keep("date", "ip", "email", "num") ) query2 = ( ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") .grok( "a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}", ) .keep("date", "ip", "email", "num") .eval(date=functions.to_datetime("date")) ) query3 = ( ESQL.from_("addresses") .keep("city.name", "zip_code") .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}") ) """ return Grok(self, input, pattern) def keep(self, *columns: FieldType) -> "Keep": """The ``KEEP`` processing command enables you to specify what columns are returned and the order in which they are returned. :param columns: The columns to keep, given as positional arguments. Supports wildcards. Examples:: query1 = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height") query2 = ESQL.from_("employees").keep("h*") query3 = ESQL.from_("employees").keep("h*", "*") """ return Keep(self, *columns) def limit(self, max_number_of_rows: int) -> "Limit": """The ``LIMIT`` processing command enables you to limit the number of rows that are returned. :param max_number_of_rows: The maximum number of rows to return. Examples:: query1 = ESQL.from_("employees").sort("emp_no ASC").limit(5) query2 = ESQL.from_("index").stats(functions.avg("field1")).by("field2").limit(20000) """ return Limit(self, max_number_of_rows) def lookup_join(self, lookup_index: IndexType) -> "LookupJoin": """`LOOKUP JOIN` enables you to add data from another index, AKA a 'lookup' index, to your ES|QL query results, simplifying data enrichment and analysis workflows. :param lookup_index: The name of the lookup index. This must be a specific index name - wildcards, aliases, and remote cluster references are not supported. Indices used for lookups must be configured with the lookup index mode. Examples:: query1 = ( ESQL.from_("firewall_logs") .lookup_join("threat_list").on("source.IP") .where("threat_level IS NOT NULL") ) query2 = ( ESQL.from_("system_metrics") .lookup_join("host_inventory").on("host.name") .lookup_join("ownerships").on("host.name") ) query3 = ( ESQL.from_("app_logs") .lookup_join("service_owners").on("service_id") ) query4 = ( ESQL.from_("employees") .eval(language_code="languages") .where("emp_no >= 10091 AND emp_no < 10094") .lookup_join("languages_lookup").on("language_code") ) """ return LookupJoin(self, lookup_index) def mv_expand(self, column: FieldType) -> "MvExpand": """The `MV_EXPAND` processing command expands multivalued columns into one row per value, duplicating other columns. :param column: The multivalued column to expand. Examples:: query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a") """ return MvExpand(self, column) def rename(self, **columns: FieldType) -> "Rename": """The ``RENAME`` processing command renames one or more columns. :param columns: The old and new column name pairs, given as keyword arguments. If a name conflicts with an existing column name, the existing column is dropped. If multiple columns are renamed to the same name, all but the rightmost column with the same new name are dropped. Examples:: query = ( ESQL.from_("employees") .keep("first_name", "last_name", "still_hired") .rename(still_hired="employed") ) """ return Rename(self, **columns) def sample(self, probability: float) -> "Sample": """The ``SAMPLE`` command samples a fraction of the table rows. :param probability: The probability that a row is included in the sample. The value must be between 0 and 1, exclusive. Examples:: query = ESQL.from_("employees").keep("emp_no").sample(0.05) """ return Sample(self, probability) def sort(self, *columns: ExpressionType) -> "Sort": """The ``SORT`` processing command sorts a table on one or more columns. :param columns: The columns to sort on. Examples:: query1 = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("height") ) query2 = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("height DESC") ) query3 = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("height DESC", "first_name ASC") ) query4 = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("first_name ASC NULLS FIRST") ) """ return Sort(self, *columns) def stats( self, *expressions: ExpressionType, **named_expressions: ExpressionType ) -> "Stats": """The ``STATS`` processing command groups rows according to a common value and calculates one or more aggregated values over the grouped rows. :param expressions: A list of expressions, given as positional arguments. :param named_expressions: A list of expressions, given as keyword arguments. The argument names are used for the returned aggregated values. Note that only one of `expressions` and `named_expressions` must be provided. Examples:: query1 = ( ESQL.from_("employees") .stats(count=functions.count("emp_no")).by("languages") .sort("languages") ) query2 = ( ESQL.from_("employees") .stats(avg_lang=functions.avg("languages")) ) query3 = ( ESQL.from_("employees") .stats( avg_lang=functions.avg("languages"), max_lang=functions.max("languages") ) ) query4 = ( ESQL.from_("employees") .stats( avg50s=functions.avg("salary").where('birth_date < "1960-01-01"'), avg60s=functions.avg("salary").where('birth_date >= "1960-01-01"'), ).by("gender") .sort("gender") ) query5 = ( ESQL.from_("employees") .eval(Ks="salary / 1000") .stats( under_40K=functions.count("*").where("Ks < 40"), inbetween=functions.count("*").where("40 <= Ks AND Ks < 60"), over_60K=functions.count("*").where("60 <= Ks"), total=f.count("*") ) ) query6 = ( ESQL.row(i=1, a=["a", "b"]) .stats(functions.min("i")).by("a") .sort("a ASC") ) query7 = ( ESQL.from_("employees") .eval(hired=functions.date_format("hire_date", "yyyy")) .stats(avg_salary=functions.avg("salary")).by("hired", "languages.long") .eval(avg_salary=functions.round("avg_salary")) .sort("hired", "languages.long") ) """ return Stats(self, *expressions, **named_expressions) def where(self, *expressions: ExpressionType) -> "Where": """The ``WHERE`` processing command produces a table that contains all the rows from the input table for which the provided condition evaluates to `true`. :param expressions: A list of boolean expressions, given as positional arguments. These expressions are combined with an ``AND`` logical operator. Examples:: query1 = ( ESQL.from_("employees") .keep("first_name", "last_name", "still_hired") .where("still_hired == true") ) query2 = ( ESQL.from_("sample_data") .where("@timestamp > NOW() - 1 hour") ) query3 = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where("LENGTH(first_name) < 4") ) """ return Where(self, *expressions) class From(ESQLBase): """Implementation of the ``FROM`` source command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, *indices: IndexType): super().__init__() self._indices = indices self._metadata_fields: Tuple[FieldType, ...] = tuple() def metadata(self, *fields: FieldType) -> "From": """Continuation of the ``FROM`` source command. :param fields: metadata fields to retrieve, given as positional arguments. """ self._metadata_fields = fields return self def _render_internal(self) -> str: indices = [self._format_index(index) for index in self._indices] s = f'{self.__class__.__name__.upper()} {", ".join(indices)}' if self._metadata_fields: s = ( s + f' METADATA {", ".join([self._format_id(field) for field in self._metadata_fields])}' ) return s class Row(ESQLBase): """Implementation of the ``ROW`` source command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, **params: ExpressionType): super().__init__() self._params = { self._format_id(k): ( json.dumps(v) if not isinstance(v, InstrumentedExpression) else self._format_expr(v) ) for k, v in params.items() } def _render_internal(self) -> str: return "ROW " + ", ".join([f"{k} = {v}" for k, v in self._params.items()]) class Show(ESQLBase): """Implementation of the ``SHOW`` source command. This class inherits from :class:`ESQLBase `, which makes it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, item: str): super().__init__() self._item = item def _render_internal(self) -> str: return f"SHOW {self._format_id(self._item)}" class Branch(ESQLBase): """Implementation of a branch inside a ``FORK`` processing command. This class inherits from :class:`ESQLBase `, which makes it possible to chain all the commands that belong to the branch in a single expression. """ def _render_internal(self) -> str: return "" class ChangePoint(ESQLBase): """Implementation of the ``CHANGE POINT`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, value: FieldType): super().__init__(parent) self._value = value self._key: Optional[FieldType] = None self._type_name: Optional[str] = None self._pvalue_name: Optional[str] = None def on(self, key: FieldType) -> "ChangePoint": """Continuation of the `CHANGE_POINT` command. :param key: The column with the key to order the values by. If not specified, `@timestamp` is used. """ self._key = key return self def as_(self, type_name: str, pvalue_name: str) -> "ChangePoint": """Continuation of the `CHANGE_POINT` command. :param type_name: The name of the output column with the change point type. If not specified, `type` is used. :param pvalue_name: The name of the output column with the p-value that indicates how extreme the change point is. If not specified, `pvalue` is used. """ self._type_name = type_name self._pvalue_name = pvalue_name return self def _render_internal(self) -> str: key = "" if not self._key else f" ON {self._format_id(self._key)}" names = ( "" if not self._type_name and not self._pvalue_name else f' AS {self._format_id(self._type_name or "type")}, {self._format_id(self._pvalue_name or "pvalue")}' ) return f"CHANGE_POINT {self._value}{key}{names}" class Completion(ESQLBase): """Implementation of the ``COMPLETION`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__( self, parent: ESQLBase, *prompt: ExpressionType, **named_prompt: ExpressionType ): if len(prompt) + len(named_prompt) > 1: raise ValueError( "this method requires either one positional or one keyword argument only" ) super().__init__(parent) self._prompt = prompt self._named_prompt = named_prompt self._inference_id: Optional[str] = None def with_(self, inference_id: str) -> "Completion": """Continuation of the `COMPLETION` command. :param inference_id: The ID of the inference endpoint to use for the task. The inference endpoint must be configured with the completion task type. """ self._inference_id = inference_id return self def _render_internal(self) -> str: if self._inference_id is None: raise ValueError("The completion command requires an inference ID") with_ = {"inference_id": self._inference_id} if self._named_prompt: column = list(self._named_prompt.keys())[0] prompt = list(self._named_prompt.values())[0] return f"COMPLETION {self._format_id(column)} = {self._format_id(prompt)} WITH {json.dumps(with_)}" else: return f"COMPLETION {self._format_id(self._prompt[0])} WITH {json.dumps(with_)}" class Dissect(ESQLBase): """Implementation of the ``DISSECT`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, input: FieldType, pattern: str): super().__init__(parent) self._input = input self._pattern = pattern self._separator: Optional[str] = None def append_separator(self, separator: str) -> "Dissect": """Continuation of the ``DISSECT`` command. :param separator: A string used as the separator between appended values, when using the append modifier. """ self._separator = separator return self def _render_internal(self) -> str: sep = ( "" if self._separator is None else f" APPEND_SEPARATOR={json.dumps(self._separator)}" ) return ( f"DISSECT {self._format_id(self._input)} {json.dumps(self._pattern)}{sep}" ) class Drop(ESQLBase): """Implementation of the ``DROP`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, *columns: FieldType): super().__init__(parent) self._columns = columns def _render_internal(self) -> str: return f'DROP {", ".join([self._format_id(col, allow_patterns=True) for col in self._columns])}' class Enrich(ESQLBase): """Implementation of the ``ENRICH`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, policy: str): super().__init__(parent) self._policy = policy self._match_field: Optional[FieldType] = None self._fields: Optional[Tuple[FieldType, ...]] = None self._named_fields: Optional[Dict[str, FieldType]] = None def on(self, match_field: FieldType) -> "Enrich": """Continuation of the ``ENRICH`` command. :param match_field: The match field. ``ENRICH`` uses its value to look for records in the enrich index. If not specified, the match will be performed on the column with the same name as the `match_field` defined in the enrich policy. """ self._match_field = match_field return self def with_(self, *fields: FieldType, **named_fields: FieldType) -> "Enrich": """Continuation of the ``ENRICH`` command. :param fields: The enrich fields from the enrich index that are added to the result as new columns, given as positional arguments. If a column with the same name as the enrich field already exists, the existing column will be replaced by the new column. If not specified, each of the enrich fields defined in the policy is added. A column with the same name as the enrich field will be dropped unless the enrich field is renamed. :param named_fields: The enrich fields from the enrich index that are added to the result as new columns, given as keyword arguments. The name of the keyword arguments are used as column names. If a column has the same name as the new name, it will be discarded. If a name (new or original) occurs more than once, only the rightmost duplicate creates a new column. """ if fields and named_fields: raise ValueError( "this method supports positional or keyword arguments but not both" ) self._fields = fields self._named_fields = named_fields return self def _render_internal(self) -> str: on = ( "" if self._match_field is None else f" ON {self._format_id(self._match_field)}" ) with_ = "" if self._named_fields: with_ = f' WITH {", ".join([f"{self._format_id(name)} = {self._format_id(field)}" for name, field in self._named_fields.items()])}' elif self._fields is not None: with_ = ( f' WITH {", ".join([self._format_id(field) for field in self._fields])}' ) return f"ENRICH {self._policy}{on}{with_}" class Eval(ESQLBase): """Implementation of the ``EVAL`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__( self, parent: ESQLBase, *columns: ExpressionType, **named_columns: ExpressionType, ): if columns and named_columns: raise ValueError( "this method supports positional or keyword arguments but not both" ) super().__init__(parent) self._columns = columns or named_columns def _render_internal(self) -> str: if isinstance(self._columns, dict): cols = ", ".join( [ f"{self._format_id(name)} = {self._format_expr(value)}" for name, value in self._columns.items() ] ) else: cols = ", ".join([f"{self._format_expr(col)}" for col in self._columns]) return f"EVAL {cols}" class Fork(ESQLBase): """Implementation of the ``FORK`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__( self, parent: ESQLBase, fork1: ESQLBase, fork2: Optional[ESQLBase] = None, fork3: Optional[ESQLBase] = None, fork4: Optional[ESQLBase] = None, fork5: Optional[ESQLBase] = None, fork6: Optional[ESQLBase] = None, fork7: Optional[ESQLBase] = None, fork8: Optional[ESQLBase] = None, ): super().__init__(parent) self._branches = [fork1, fork2, fork3, fork4, fork5, fork6, fork7, fork8] def _render_internal(self) -> str: cmds = "" for branch in self._branches: if branch: cmd = branch.render()[3:].replace("\n", " ") if cmds == "": cmds = f"( {cmd} )" else: cmds += f"\n ( {cmd} )" return f"FORK {cmds}" class Grok(ESQLBase): """Implementation of the ``GROK`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, input: FieldType, pattern: str): super().__init__(parent) self._input = input self._pattern = pattern def _render_internal(self) -> str: return f"GROK {self._format_id(self._input)} {json.dumps(self._pattern)}" class Keep(ESQLBase): """Implementation of the ``KEEP`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, *columns: FieldType): super().__init__(parent) self._columns = columns def _render_internal(self) -> str: return f'KEEP {", ".join([f"{self._format_id(col, allow_patterns=True)}" for col in self._columns])}' class Limit(ESQLBase): """Implementation of the ``LIMIT`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, max_number_of_rows: int): super().__init__(parent) self._max_number_of_rows = max_number_of_rows def _render_internal(self) -> str: return f"LIMIT {json.dumps(self._max_number_of_rows)}" class LookupJoin(ESQLBase): """Implementation of the ``LOOKUP JOIN`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, lookup_index: IndexType): super().__init__(parent) self._lookup_index = lookup_index self._field: Optional[FieldType] = None def on(self, field: FieldType) -> "LookupJoin": """Continuation of the `LOOKUP_JOIN` command. :param field: The field to join on. This field must exist in both your current query results and in the lookup index. If the field contains multi-valued entries, those entries will not match anything (the added fields will contain null for those rows). """ self._field = field return self def _render_internal(self) -> str: if self._field is None: raise ValueError("Joins require a field to join on.") index = ( self._lookup_index if isinstance(self._lookup_index, str) else self._lookup_index._index._name ) return ( f"LOOKUP JOIN {self._format_index(index)} ON {self._format_id(self._field)}" ) class MvExpand(ESQLBase): """Implementation of the ``MV_EXPAND`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, column: FieldType): super().__init__(parent) self._column = column def _render_internal(self) -> str: return f"MV_EXPAND {self._format_id(self._column)}" class Rename(ESQLBase): """Implementation of the ``RENAME`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, **columns: FieldType): super().__init__(parent) self._columns = columns def _render_internal(self) -> str: return f'RENAME {", ".join([f"{self._format_id(old_name)} AS {self._format_id(new_name)}" for old_name, new_name in self._columns.items()])}' class Sample(ESQLBase): """Implementation of the ``SAMPLE`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, probability: float): super().__init__(parent) self._probability = probability def _render_internal(self) -> str: return f"SAMPLE {json.dumps(self._probability)}" class Sort(ESQLBase): """Implementation of the ``SORT`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, *columns: ExpressionType): super().__init__(parent) self._columns = columns def _render_internal(self) -> str: sorts = [ " ".join([self._format_id(term) for term in str(col).split(" ")]) for col in self._columns ] return f'SORT {", ".join([f"{sort}" for sort in sorts])}' class Stats(ESQLBase): """Implementation of the ``STATS`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__( self, parent: ESQLBase, *expressions: ExpressionType, **named_expressions: ExpressionType, ): if expressions and named_expressions: raise ValueError( "this method supports positional or keyword arguments but not both" ) super().__init__(parent) self._expressions = expressions or named_expressions self._grouping_expressions: Optional[Tuple[ExpressionType, ...]] = None def by(self, *grouping_expressions: ExpressionType) -> "Stats": self._grouping_expressions = grouping_expressions return self def _render_internal(self) -> str: if isinstance(self._expressions, dict): exprs = [ f"{self._format_id(key)} = {self._format_expr(value)}" for key, value in self._expressions.items() ] else: exprs = [f"{self._format_expr(expr)}" for expr in self._expressions] expression_separator = ",\n " by = ( "" if self._grouping_expressions is None else f'\n BY {", ".join([f"{self._format_expr(expr)}" for expr in self._grouping_expressions])}' ) return f'STATS {expression_separator.join([f"{expr}" for expr in exprs])}{by}' class Where(ESQLBase): """Implementation of the ``WHERE`` processing command. This class inherits from :class:`ESQLBase `, to make it possible to chain all the commands that belong to an ES|QL query in a single expression. """ def __init__(self, parent: ESQLBase, *expressions: ExpressionType): super().__init__(parent) self._expressions = expressions def _render_internal(self) -> str: return f'WHERE {" AND ".join([f"{self._format_expr(expr)}" for expr in self._expressions])}' def and_(*expressions: InstrumentedExpression) -> "InstrumentedExpression": """Combine two or more expressions with the AND operator.""" return InstrumentedExpression(" AND ".join([f"({expr})" for expr in expressions])) def or_(*expressions: InstrumentedExpression) -> "InstrumentedExpression": """Combine two or more expressions with the OR operator.""" return InstrumentedExpression(" OR ".join([f"({expr})" for expr in expressions])) def not_(expression: InstrumentedExpression) -> "InstrumentedExpression": """Negate an expression.""" return InstrumentedExpression(f"NOT ({expression})") python-elasticsearch-9.1.1/elasticsearch/esql/functions.py000066400000000000000000002040451506101734100240160ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from typing import Any from elasticsearch.dsl.document_base import InstrumentedExpression from elasticsearch.esql.esql import ESQLBase, ExpressionType def _render(v: Any) -> str: return ( json.dumps(v) if not isinstance(v, InstrumentedExpression) else ESQLBase._format_expr(v) ) def abs(number: ExpressionType) -> InstrumentedExpression: """Returns the absolute value. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ABS({_render(number)})") def acos(number: ExpressionType) -> InstrumentedExpression: """Returns the arccosine of `n` as an angle, expressed in radians. :param number: Number between -1 and 1. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ACOS({_render(number)})") def asin(number: ExpressionType) -> InstrumentedExpression: """Returns the arcsine of the input numeric expression as an angle, expressed in radians. :param number: Number between -1 and 1. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ASIN({_render(number)})") def atan(number: ExpressionType) -> InstrumentedExpression: """Returns the arctangent of the input numeric expression as an angle, expressed in radians. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ATAN({_render(number)})") def atan2( y_coordinate: ExpressionType, x_coordinate: ExpressionType ) -> InstrumentedExpression: """The angle between the positive x-axis and the ray from the origin to the point (x , y) in the Cartesian plane, expressed in radians. :param y_coordinate: y coordinate. If `null`, the function returns `null`. :param x_coordinate: x coordinate. If `null`, the function returns `null`. """ return InstrumentedExpression( f"ATAN2({_render(y_coordinate)}, {_render(x_coordinate)})" ) def avg(number: ExpressionType) -> InstrumentedExpression: """The average of a numeric field. :param number: Expression that outputs values to average. """ return InstrumentedExpression(f"AVG({_render(number)})") def avg_over_time(number: ExpressionType) -> InstrumentedExpression: """The average over time of a numeric field. :param number: Expression that outputs values to average. """ return InstrumentedExpression(f"AVG_OVER_TIME({_render(number)})") def bit_length(string: ExpressionType) -> InstrumentedExpression: """Returns the bit length of a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"BIT_LENGTH({_render(string)})") def bucket( field: ExpressionType, buckets: ExpressionType, from_: ExpressionType, to: ExpressionType, ) -> InstrumentedExpression: """Creates groups of values - buckets - out of a datetime or numeric input. The size of the buckets can either be provided directly, or chosen based on a recommended count and values range. :param field: Numeric or date expression from which to derive buckets. :param buckets: Target number of buckets, or desired bucket size if `from` and `to` parameters are omitted. :param from_: Start of the range. Can be a number, a date or a date expressed as a string. :param to: End of the range. Can be a number, a date or a date expressed as a string. """ return InstrumentedExpression( f"BUCKET({_render(field)}, {_render(buckets)}, {_render(from_)}, {_render(to)})" ) def byte_length(string: ExpressionType) -> InstrumentedExpression: """Returns the byte length of a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"BYTE_LENGTH({_render(string)})") def case(*conditions: ExpressionType) -> InstrumentedExpression: """Accepts pairs of conditions and values. The function returns the value that belongs to the first condition that evaluates to `true`. If the number of arguments is odd, the last argument is the default value which is returned when no condition matches. If the number of arguments is even, and no condition matches, the function returns `null`. """ return InstrumentedExpression( f'CASE({", ".join([_render(c) for c in conditions])})' ) def categorize(field: ExpressionType) -> InstrumentedExpression: """Groups text messages into categories of similarly formatted text values. :param field: Expression to categorize """ return InstrumentedExpression(f"CATEGORIZE({_render(field)})") def cbrt(number: ExpressionType) -> InstrumentedExpression: """Returns the cube root of a number. The input can be any numeric value, the return value is always a double. Cube roots of infinities are null. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"CBRT({_render(number)})") def ceil(number: ExpressionType) -> InstrumentedExpression: """Round a number up to the nearest integer. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"CEIL({_render(number)})") def cidr_match(ip: ExpressionType, block_x: ExpressionType) -> InstrumentedExpression: """Returns true if the provided IP is contained in one of the provided CIDR blocks. :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported). :param block_x: CIDR block to test the IP against. """ return InstrumentedExpression(f"CIDR_MATCH({_render(ip)}, {_render(block_x)})") def coalesce(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: """Returns the first of its arguments that is not null. If all arguments are null, it returns `null`. :param first: Expression to evaluate. :param rest: Other expression to evaluate. """ return InstrumentedExpression(f"COALESCE({_render(first)}, {_render(rest)})") def concat(*strings: ExpressionType) -> InstrumentedExpression: """Concatenates two or more strings.""" return InstrumentedExpression( f'CONCAT({", ".join([f"{_render(s)}" for s in strings])})' ) def cos(angle: ExpressionType) -> InstrumentedExpression: """Returns the cosine of an angle. :param angle: An angle, in radians. If `null`, the function returns `null`. """ return InstrumentedExpression(f"COS({_render(angle)})") def cosh(number: ExpressionType) -> InstrumentedExpression: """Returns the hyperbolic cosine of a number. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"COSH({_render(number)})") def count(field: ExpressionType) -> InstrumentedExpression: """Returns the total number (count) of input values. :param field: Expression that outputs values to be counted. If omitted, equivalent to `COUNT(*)` (the number of rows). """ return InstrumentedExpression(f"COUNT({_render(field)})") def count_distinct( field: ExpressionType, precision: ExpressionType ) -> InstrumentedExpression: """Returns the approximate number of distinct values. :param field: Column or literal for which to count the number of distinct values. :param precision: Precision threshold. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000. """ return InstrumentedExpression( f"COUNT_DISTINCT({_render(field)}, {_render(precision)})" ) def count_distinct_over_time( field: ExpressionType, precision: ExpressionType ) -> InstrumentedExpression: """The count of distinct values over time for a field. :param field: :param precision: Precision threshold. The maximum supported value is 40000. Thresholds above this number will have the same effect as a threshold of 40000. The default value is 3000. """ return InstrumentedExpression( f"COUNT_DISTINCT_OVER_TIME({_render(field)}, {_render(precision)})" ) def count_over_time(field: ExpressionType) -> InstrumentedExpression: """The count over time value of a field. :param field: """ return InstrumentedExpression(f"COUNT_OVER_TIME({_render(field)})") def date_diff( unit: ExpressionType, start_timestamp: ExpressionType, end_timestamp: ExpressionType ) -> InstrumentedExpression: """Subtracts the `startTimestamp` from the `endTimestamp` and returns the difference in multiples of `unit`. If `startTimestamp` is later than the `endTimestamp`, negative values are returned. :param unit: Time difference unit :param start_timestamp: A string representing a start timestamp :param end_timestamp: A string representing an end timestamp """ return InstrumentedExpression( f"DATE_DIFF({_render(unit)}, {_render(start_timestamp)}, {_render(end_timestamp)})" ) def date_extract( date_part: ExpressionType, date: ExpressionType ) -> InstrumentedExpression: """Extracts parts of a date, like year, month, day, hour. :param date_part: Part of the date to extract. Can be: `aligned_day_of_week_in_month`, `aligned_day_of_week_in_year`, `aligned_week_of_month`, `aligned_week_of_year`, `ampm_of_day`, `clock_hour_of_ampm`, `clock_hour_of_day`, `day_of_month`, `day_of_week`, `day_of_year`, `epoch_day`, `era`, `hour_of_ampm`, `hour_of_day`, `instant_seconds`, `micro_of_day`, `micro_of_second`, `milli_of_day`, `milli_of_second`, `minute_of_day`, `minute_of_hour`, `month_of_year`, `nano_of_day`, `nano_of_second`, `offset_seconds`, `proleptic_month`, `second_of_day`, `second_of_minute`, `year`, or `year_of_era`. If `null`, the function returns `null`. :param date: Date expression. If `null`, the function returns `null`. """ return InstrumentedExpression( f"DATE_EXTRACT({_render(date_part)}, {_render(date)})" ) def date_format( date: ExpressionType, date_format: ExpressionType = None, ) -> InstrumentedExpression: """Returns a string representation of a date, in the provided format. :param date_format: Date format (optional). If no format is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns `null`. :param date: Date expression. If `null`, the function returns `null`. """ if date_format is not None: return InstrumentedExpression( f"DATE_FORMAT({_render(date_format)}, {_render(date)})" ) else: return InstrumentedExpression(f"DATE_FORMAT({_render(date)})") def date_parse( date_pattern: ExpressionType, date_string: ExpressionType ) -> InstrumentedExpression: """Returns a date by parsing the second argument using the format specified in the first argument. :param date_pattern: The date format. If `null`, the function returns `null`. :param date_string: Date expression as a string. If `null` or an empty string, the function returns `null`. """ return InstrumentedExpression( f"DATE_PARSE({_render(date_pattern)}, {_render(date_string)})" ) def date_trunc( interval: ExpressionType, date: ExpressionType ) -> InstrumentedExpression: """Rounds down a date to the closest interval since epoch, which starts at `0001-01-01T00:00:00Z`. :param interval: Interval; expressed using the timespan literal syntax. :param date: Date expression """ return InstrumentedExpression(f"DATE_TRUNC({_render(interval)}, {_render(date)})") def e() -> InstrumentedExpression: """Returns Euler’s number).""" return InstrumentedExpression("E()") def ends_with(str: ExpressionType, suffix: ExpressionType) -> InstrumentedExpression: """Returns a boolean that indicates whether a keyword string ends with another string. :param str: String expression. If `null`, the function returns `null`. :param suffix: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ENDS_WITH({_render(str)}, {_render(suffix)})") def exp(number: ExpressionType) -> InstrumentedExpression: """Returns the value of e raised to the power of the given number. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"EXP({_render(number)})") def first_over_time(field: ExpressionType) -> InstrumentedExpression: """The earliest value of a field, where recency determined by the `@timestamp` field. :param field: """ return InstrumentedExpression(f"FIRST_OVER_TIME({_render(field)})") def floor(number: ExpressionType) -> InstrumentedExpression: """Round a number down to the nearest integer. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"FLOOR({_render(number)})") def from_base64(string: ExpressionType) -> InstrumentedExpression: """Decode a base64 string. :param string: A base64 string. """ return InstrumentedExpression(f"FROM_BASE64({_render(string)})") def greatest(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: """Returns the maximum value from multiple columns. This is similar to `MV_MAX` except it is intended to run on multiple columns at once. :param first: First of the columns to evaluate. :param rest: The rest of the columns to evaluate. """ return InstrumentedExpression(f"GREATEST({_render(first)}, {_render(rest)})") def hash(algorithm: ExpressionType, input: ExpressionType) -> InstrumentedExpression: """Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. :param algorithm: Hash algorithm to use. :param input: Input to hash. """ return InstrumentedExpression(f"HASH({_render(algorithm)}, {_render(input)})") def hypot(number1: ExpressionType, number2: ExpressionType) -> InstrumentedExpression: """Returns the hypotenuse of two numbers. The input can be any numeric values, the return value is always a double. Hypotenuses of infinities are null. :param number1: Numeric expression. If `null`, the function returns `null`. :param number2: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"HYPOT({number1}, {number2})") def ip_prefix( ip: ExpressionType, prefix_length_v4: ExpressionType, prefix_length_v6: ExpressionType, ) -> InstrumentedExpression: """Truncates an IP to a given prefix length. :param ip: IP address of type `ip` (both IPv4 and IPv6 are supported). :param prefix_length_v4: Prefix length for IPv4 addresses. :param prefix_length_v6: Prefix length for IPv6 addresses. """ return InstrumentedExpression( f"IP_PREFIX({_render(ip)}, {prefix_length_v4}, {prefix_length_v6})" ) def knn( field: ExpressionType, query: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Finds the k nearest vectors to a query vector, as measured by a similarity metric. knn function finds nearest vectors through approximate search on indexed dense_vectors. :param field: Field that the query will target. :param query: Vector value to find top nearest neighbours for. :param options: (Optional) kNN additional options as function named parameters. """ if options is not None: return InstrumentedExpression( f"KNN({_render(field)}, {_render(query)}, {_render(options)})" ) else: return InstrumentedExpression(f"KNN({_render(field)}, {_render(query)})") def kql(query: ExpressionType) -> InstrumentedExpression: """Performs a KQL query. Returns true if the provided KQL query string matches the row. :param query: Query string in KQL query string format. """ return InstrumentedExpression(f"KQL({_render(query)})") def last_over_time(field: ExpressionType) -> InstrumentedExpression: """The latest value of a field, where recency determined by the `@timestamp` field. :param field: """ return InstrumentedExpression(f"LAST_OVER_TIME({_render(field)})") def least(first: ExpressionType, rest: ExpressionType) -> InstrumentedExpression: """Returns the minimum value from multiple columns. This is similar to `MV_MIN` except it is intended to run on multiple columns at once. :param first: First of the columns to evaluate. :param rest: The rest of the columns to evaluate. """ return InstrumentedExpression(f"LEAST({_render(first)}, {_render(rest)})") def left(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression: """Returns the substring that extracts *length* chars from *string* starting from the left. :param string: The string from which to return a substring. :param length: The number of characters to return. """ return InstrumentedExpression(f"LEFT({_render(string)}, {_render(length)})") def length(string: ExpressionType) -> InstrumentedExpression: """Returns the character length of a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"LENGTH({_render(string)})") def locate( string: ExpressionType, substring: ExpressionType, start: ExpressionType ) -> InstrumentedExpression: """Returns an integer that indicates the position of a keyword substring within another string. Returns `0` if the substring cannot be found. Note that string positions start from `1`. :param string: An input string :param substring: A substring to locate in the input string :param start: The start index """ return InstrumentedExpression( f"LOCATE({_render(string)}, {_render(substring)}, {_render(start)})" ) def log(base: ExpressionType, number: ExpressionType) -> InstrumentedExpression: """Returns the logarithm of a value to a base. The input can be any numeric value, the return value is always a double. Logs of zero, negative numbers, and base of one return `null` as well as a warning. :param base: Base of logarithm. If `null`, the function returns `null`. If not provided, this function returns the natural logarithm (base e) of a value. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"LOG({_render(base)}, {_render(number)})") def log10(number: ExpressionType) -> InstrumentedExpression: """Returns the logarithm of a value to base 10. The input can be any numeric value, the return value is always a double. Logs of 0 and negative numbers return `null` as well as a warning. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"LOG10({_render(number)})") def ltrim(string: ExpressionType) -> InstrumentedExpression: """Removes leading whitespaces from a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"LTRIM({_render(string)})") def match( field: ExpressionType, query: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Use `MATCH` to perform a match query on the specified field. Using `MATCH` is equivalent to using the `match` query in the Elasticsearch Query DSL. :param field: Field that the query will target. :param query: Value to find in the provided field. :param options: (Optional) Match additional options as function named parameters. """ if options is not None: return InstrumentedExpression( f"MATCH({_render(field)}, {_render(query)}, {_render(options)})" ) else: return InstrumentedExpression(f"MATCH({_render(field)}, {_render(query)})") def match_phrase( field: ExpressionType, query: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Use `MATCH_PHRASE` to perform a `match_phrase` on the specified field. Using `MATCH_PHRASE` is equivalent to using the `match_phrase` query in the Elasticsearch Query DSL. :param field: Field that the query will target. :param query: Value to find in the provided field. :param options: (Optional) MatchPhrase additional options as function named parameters. """ if options is not None: return InstrumentedExpression( f"MATCH_PHRASE({_render(field)}, {_render(query)}, {_render(options)})" ) else: return InstrumentedExpression( f"MATCH_PHRASE({_render(field)}, {_render(query)})" ) def max(field: ExpressionType) -> InstrumentedExpression: """The maximum value of a field. :param field: """ return InstrumentedExpression(f"MAX({_render(field)})") def max_over_time(field: ExpressionType) -> InstrumentedExpression: """The maximum over time value of a field. :param field: """ return InstrumentedExpression(f"MAX_OVER_TIME({_render(field)})") def md5(input: ExpressionType) -> InstrumentedExpression: """Computes the MD5 hash of the input. :param input: Input to hash. """ return InstrumentedExpression(f"MD5({_render(input)})") def median(number: ExpressionType) -> InstrumentedExpression: """The value that is greater than half of all values and less than half of all values, also known as the 50% `PERCENTILE`. :param number: Expression that outputs values to calculate the median of. """ return InstrumentedExpression(f"MEDIAN({_render(number)})") def median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression: """Returns the median absolute deviation, a measure of variability. It is a robust statistic, meaning that it is useful for describing data that may have outliers, or may not be normally distributed. For such data it can be more descriptive than standard deviation. It is calculated as the median of each data point’s deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. :param number: """ return InstrumentedExpression(f"MEDIAN_ABSOLUTE_DEVIATION({_render(number)})") def min(field: ExpressionType) -> InstrumentedExpression: """The minimum value of a field. :param field: """ return InstrumentedExpression(f"MIN({_render(field)})") def min_over_time(field: ExpressionType) -> InstrumentedExpression: """The minimum over time value of a field. :param field: """ return InstrumentedExpression(f"MIN_OVER_TIME({_render(field)})") def multi_match( query: ExpressionType, *fields: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Use `MULTI_MATCH` to perform a multi-match query on the specified field. The multi_match query builds on the match query to allow multi-field queries. :param query: Value to find in the provided fields. :param fields: Fields to use for matching :param options: (Optional) Additional options for MultiMatch, passed as function named parameters """ if options is not None: return InstrumentedExpression( f'MULTI_MATCH({_render(query)}, {", ".join([_render(c) for c in fields])}, {_render(options)})' ) else: return InstrumentedExpression( f'MULTI_MATCH({_render(query)}, {", ".join([_render(c) for c in fields])})' ) def mv_append(field1: ExpressionType, field2: ExpressionType) -> InstrumentedExpression: """Concatenates values of two multi-value fields. :param field1: :param field2: """ return InstrumentedExpression(f"MV_APPEND({field1}, {field2})") def mv_avg(number: ExpressionType) -> InstrumentedExpression: """Converts a multivalued field into a single valued field containing the average of all of the values. :param number: Multivalue expression. """ return InstrumentedExpression(f"MV_AVG({_render(number)})") def mv_concat(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression: """Converts a multivalued string expression into a single valued column containing the concatenation of all values separated by a delimiter. :param string: Multivalue expression. :param delim: Delimiter. """ return InstrumentedExpression(f"MV_CONCAT({_render(string)}, {_render(delim)})") def mv_count(field: ExpressionType) -> InstrumentedExpression: """Converts a multivalued expression into a single valued column containing a count of the number of values. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_COUNT({_render(field)})") def mv_dedupe(field: ExpressionType) -> InstrumentedExpression: """Remove duplicate values from a multivalued field. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_DEDUPE({_render(field)})") def mv_first(field: ExpressionType) -> InstrumentedExpression: """Converts a multivalued expression into a single valued column containing the first value. This is most useful when reading from a function that emits multivalued columns in a known order like `SPLIT`. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_FIRST({_render(field)})") def mv_last(field: ExpressionType) -> InstrumentedExpression: """Converts a multivalue expression into a single valued column containing the last value. This is most useful when reading from a function that emits multivalued columns in a known order like `SPLIT`. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_LAST({_render(field)})") def mv_max(field: ExpressionType) -> InstrumentedExpression: """Converts a multivalued expression into a single valued column containing the maximum value. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_MAX({_render(field)})") def mv_median(number: ExpressionType) -> InstrumentedExpression: """Converts a multivalued field into a single valued field containing the median value. :param number: Multivalue expression. """ return InstrumentedExpression(f"MV_MEDIAN({_render(number)})") def mv_median_absolute_deviation(number: ExpressionType) -> InstrumentedExpression: """Converts a multivalued field into a single valued field containing the median absolute deviation. It is calculated as the median of each data point’s deviation from the median of the entire sample. That is, for a random variable `X`, the median absolute deviation is `median(|median(X) - X|)`. :param number: Multivalue expression. """ return InstrumentedExpression(f"MV_MEDIAN_ABSOLUTE_DEVIATION({_render(number)})") def mv_min(field: ExpressionType) -> InstrumentedExpression: """Converts a multivalued expression into a single valued column containing the minimum value. :param field: Multivalue expression. """ return InstrumentedExpression(f"MV_MIN({_render(field)})") def mv_percentile( number: ExpressionType, percentile: ExpressionType ) -> InstrumentedExpression: """Converts a multivalued field into a single valued field containing the value at which a certain percentage of observed values occur. :param number: Multivalue expression. :param percentile: The percentile to calculate. Must be a number between 0 and 100. Numbers out of range will return a null instead. """ return InstrumentedExpression( f"MV_PERCENTILE({_render(number)}, {_render(percentile)})" ) def mv_pseries_weighted_sum( number: ExpressionType, p: ExpressionType ) -> InstrumentedExpression: """Converts a multivalued expression into a single-valued column by multiplying every element on the input list by its corresponding term in P-Series and computing the sum. :param number: Multivalue expression. :param p: It is a constant number that represents the *p* parameter in the P-Series. It impacts every element’s contribution to the weighted sum. """ return InstrumentedExpression( f"MV_PSERIES_WEIGHTED_SUM({_render(number)}, {_render(p)})" ) def mv_slice( field: ExpressionType, start: ExpressionType, end: ExpressionType = None ) -> InstrumentedExpression: """Returns a subset of the multivalued field using the start and end index values. This is most useful when reading from a function that emits multivalued columns in a known order like `SPLIT` or `MV_SORT`. :param field: Multivalue expression. If `null`, the function returns `null`. :param start: Start position. If `null`, the function returns `null`. The start argument can be negative. An index of -1 is used to specify the last value in the list. :param end: End position(included). Optional; if omitted, the position at `start` is returned. The end argument can be negative. An index of -1 is used to specify the last value in the list. """ if end is not None: return InstrumentedExpression( f"MV_SLICE({_render(field)}, {_render(start)}, {_render(end)})" ) else: return InstrumentedExpression(f"MV_SLICE({_render(field)}, {_render(start)})") def mv_sort(field: ExpressionType, order: ExpressionType) -> InstrumentedExpression: """Sorts a multivalued field in lexicographical order. :param field: Multivalue expression. If `null`, the function returns `null`. :param order: Sort order. The valid options are ASC and DESC, the default is ASC. """ return InstrumentedExpression(f"MV_SORT({_render(field)}, {_render(order)})") def mv_sum(number: ExpressionType) -> InstrumentedExpression: """Converts a multivalued field into a single valued field containing the sum of all of the values. :param number: Multivalue expression. """ return InstrumentedExpression(f"MV_SUM({_render(number)})") def mv_zip( string1: ExpressionType, string2: ExpressionType, delim: ExpressionType = None ) -> InstrumentedExpression: """Combines the values from two multivalued fields with a delimiter that joins them together. :param string1: Multivalue expression. :param string2: Multivalue expression. :param delim: Delimiter. Optional; if omitted, `,` is used as a default delimiter. """ if delim is not None: return InstrumentedExpression(f"MV_ZIP({string1}, {string2}, {_render(delim)})") else: return InstrumentedExpression(f"MV_ZIP({string1}, {string2})") def now() -> InstrumentedExpression: """Returns current date and time.""" return InstrumentedExpression("NOW()") def percentile( number: ExpressionType, percentile: ExpressionType ) -> InstrumentedExpression: """Returns the value at which a certain percentage of observed values occur. For example, the 95th percentile is the value which is greater than 95% of the observed values and the 50th percentile is the `MEDIAN`. :param number: :param percentile: """ return InstrumentedExpression( f"PERCENTILE({_render(number)}, {_render(percentile)})" ) def pi() -> InstrumentedExpression: """Returns Pi, the ratio of a circle’s circumference to its diameter.""" return InstrumentedExpression("PI()") def pow(base: ExpressionType, exponent: ExpressionType) -> InstrumentedExpression: """Returns the value of `base` raised to the power of `exponent`. :param base: Numeric expression for the base. If `null`, the function returns `null`. :param exponent: Numeric expression for the exponent. If `null`, the function returns `null`. """ return InstrumentedExpression(f"POW({_render(base)}, {_render(exponent)})") def qstr( query: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Performs a query string query. Returns true if the provided query string matches the row. :param query: Query string in Lucene query string format. :param options: (Optional) Additional options for Query String as function named parameters. """ if options is not None: return InstrumentedExpression(f"QSTR({_render(query)}, {_render(options)})") else: return InstrumentedExpression(f"QSTR({_render(query)})") def rate(field: ExpressionType) -> InstrumentedExpression: """The rate of a counter field. :param field: """ return InstrumentedExpression(f"RATE({_render(field)})") def repeat(string: ExpressionType, number: ExpressionType) -> InstrumentedExpression: """Returns a string constructed by concatenating `string` with itself the specified `number` of times. :param string: String expression. :param number: Number times to repeat. """ return InstrumentedExpression(f"REPEAT({_render(string)}, {_render(number)})") def replace( string: ExpressionType, regex: ExpressionType, new_string: ExpressionType ) -> InstrumentedExpression: """The function substitutes in the string `str` any match of the regular expression `regex` with the replacement string `newStr`. :param string: String expression. :param regex: Regular expression. :param new_string: Replacement string. """ return InstrumentedExpression( f"REPLACE({_render(string)}, {_render(regex)}, {_render(new_string)})" ) def reverse(str: ExpressionType) -> InstrumentedExpression: """Returns a new string representing the input string in reverse order. :param str: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"REVERSE({_render(str)})") def right(string: ExpressionType, length: ExpressionType) -> InstrumentedExpression: """Return the substring that extracts *length* chars from *str* starting from the right. :param string: The string from which to returns a substring. :param length: The number of characters to return. """ return InstrumentedExpression(f"RIGHT({_render(string)}, {_render(length)})") def round( number: ExpressionType, decimals: ExpressionType = None ) -> InstrumentedExpression: """Rounds a number to the specified number of decimal places. Defaults to 0, which returns the nearest integer. If the precision is a negative number, rounds to the number of digits left of the decimal point. :param number: The numeric value to round. If `null`, the function returns `null`. :param decimals: The number of decimal places to round to. Defaults to 0. If `null`, the function returns `null`. """ if decimals is not None: return InstrumentedExpression(f"ROUND({_render(number)}, {_render(decimals)})") else: return InstrumentedExpression(f"ROUND({_render(number)})") def round_to(field: ExpressionType, points: ExpressionType) -> InstrumentedExpression: """Rounds down to one of a list of fixed points. :param field: The numeric value to round. If `null`, the function returns `null`. :param points: Remaining rounding points. Must be constants. """ return InstrumentedExpression(f"ROUND_TO({_render(field)}, {_render(points)})") def rtrim(string: ExpressionType) -> InstrumentedExpression: """Removes trailing whitespaces from a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"RTRIM({_render(string)})") def sample(field: ExpressionType, limit: ExpressionType) -> InstrumentedExpression: """Collects sample values for a field. :param field: The field to collect sample values for. :param limit: The maximum number of values to collect. """ return InstrumentedExpression(f"SAMPLE({_render(field)}, {_render(limit)})") def scalb(d: ExpressionType, scale_factor: ExpressionType) -> InstrumentedExpression: """Returns the result of `d * 2 ^ scaleFactor`, Similar to Java's `scalb` function. Result is rounded as if performed by a single correctly rounded floating-point multiply to a member of the double value set. :param d: Numeric expression for the multiplier. If `null`, the function returns `null`. :param scale_factor: Numeric expression for the scale factor. If `null`, the function returns `null`. """ return InstrumentedExpression(f"SCALB({_render(d)}, {_render(scale_factor)})") def sha1(input: ExpressionType) -> InstrumentedExpression: """Computes the SHA1 hash of the input. :param input: Input to hash. """ return InstrumentedExpression(f"SHA1({_render(input)})") def sha256(input: ExpressionType) -> InstrumentedExpression: """Computes the SHA256 hash of the input. :param input: Input to hash. """ return InstrumentedExpression(f"SHA256({_render(input)})") def signum(number: ExpressionType) -> InstrumentedExpression: """Returns the sign of the given number. It returns `-1` for negative numbers, `0` for `0` and `1` for positive numbers. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"SIGNUM({_render(number)})") def sin(angle: ExpressionType) -> InstrumentedExpression: """Returns the sine of an angle. :param angle: An angle, in radians. If `null`, the function returns `null`. """ return InstrumentedExpression(f"SIN({_render(angle)})") def sinh(number: ExpressionType) -> InstrumentedExpression: """Returns the hyperbolic sine of a number. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"SINH({_render(number)})") def space(number: ExpressionType) -> InstrumentedExpression: """Returns a string made of `number` spaces. :param number: Number of spaces in result. """ return InstrumentedExpression(f"SPACE({_render(number)})") def split(string: ExpressionType, delim: ExpressionType) -> InstrumentedExpression: """Split a single valued string into multiple strings. :param string: String expression. If `null`, the function returns `null`. :param delim: Delimiter. Only single byte delimiters are currently supported. """ return InstrumentedExpression(f"SPLIT({_render(string)}, {_render(delim)})") def sqrt(number: ExpressionType) -> InstrumentedExpression: """Returns the square root of a number. The input can be any numeric value, the return value is always a double. Square roots of negative numbers and infinities are null. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"SQRT({_render(number)})") def starts_with(str: ExpressionType, prefix: ExpressionType) -> InstrumentedExpression: """Returns a boolean that indicates whether a keyword string starts with another string. :param str: String expression. If `null`, the function returns `null`. :param prefix: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"STARTS_WITH({_render(str)}, {_render(prefix)})") def std_dev(number: ExpressionType) -> InstrumentedExpression: """The population standard deviation of a numeric field. :param number: """ return InstrumentedExpression(f"STD_DEV({_render(number)})") def st_centroid_agg(field: ExpressionType) -> InstrumentedExpression: """Calculate the spatial centroid over a field with spatial point geometry type. :param field: """ return InstrumentedExpression(f"ST_CENTROID_AGG({_render(field)})") def st_contains( geom_a: ExpressionType, geom_b: ExpressionType ) -> InstrumentedExpression: """Returns whether the first geometry contains the second geometry. This is the inverse of the ST_WITHIN function. :param geom_a: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ return InstrumentedExpression(f"ST_CONTAINS({_render(geom_a)}, {_render(geom_b)})") def st_disjoint( geom_a: ExpressionType, geom_b: ExpressionType ) -> InstrumentedExpression: """Returns whether the two geometries or geometry columns are disjoint. This is the inverse of the ST_INTERSECTS function. In mathematical terms: ST_Disjoint(A, B) ⇔ A ⋂ B = ∅ :param geom_a: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ return InstrumentedExpression(f"ST_DISJOINT({_render(geom_a)}, {_render(geom_b)})") def st_distance( geom_a: ExpressionType, geom_b: ExpressionType ) -> InstrumentedExpression: """Computes the distance between two points. For cartesian geometries, this is the pythagorean distance in the same units as the original coordinates. For geographic geometries, this is the circular distance along the great circle in meters. :param geom_a: Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. :param geom_b: Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_point` and `cartesian_point` parameters. """ return InstrumentedExpression(f"ST_DISTANCE({_render(geom_a)}, {_render(geom_b)})") def st_envelope(geometry: ExpressionType) -> InstrumentedExpression: """Determines the minimum bounding box of the supplied geometry. :param geometry: Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_ENVELOPE({_render(geometry)})") def st_extent_agg(field: ExpressionType) -> InstrumentedExpression: """Calculate the spatial extent over a field with geometry type. Returns a bounding box for all values of the field. :param field: """ return InstrumentedExpression(f"ST_EXTENT_AGG({_render(field)})") def st_geohash( geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None ) -> InstrumentedExpression: """Calculates the `geohash` of the supplied geo_point at the specified precision. The result is long encoded. Use ST_GEOHASH_TO_STRING to convert the result to a string. These functions are related to the `geo_grid` query and the `geohash_grid` aggregation. :param geometry: Expression of type `geo_point`. If `null`, the function returns `null`. :param precision: Expression of type `integer`. If `null`, the function returns `null`. Valid values are between 1 and 12. :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any other type. """ if bounds is not None: return InstrumentedExpression( f"ST_GEOHASH({_render(geometry)}, {_render(precision)}, {_render(bounds)})" ) else: return InstrumentedExpression( f"ST_GEOHASH({_render(geometry)}, {_render(precision)})" ) def st_geohash_to_long(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a geohash grid-ID in string format into a long. :param grid_id: Input geohash grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOHASH_TO_LONG({_render(grid_id)})") def st_geohash_to_string(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a geohash grid-ID in long format into a string. :param grid_id: Input geohash grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOHASH_TO_STRING({_render(grid_id)})") def st_geohex( geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None ) -> InstrumentedExpression: """Calculates the `geohex`, the H3 cell-id, of the supplied geo_point at the specified precision. The result is long encoded. Use ST_GEOHEX_TO_STRING to convert the result to a string. These functions are related to the `geo_grid` query and the `geohex_grid` aggregation. :param geometry: Expression of type `geo_point`. If `null`, the function returns `null`. :param precision: Expression of type `integer`. If `null`, the function returns `null`. Valid values are between 0 and 15. :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any other type. """ if bounds is not None: return InstrumentedExpression( f"ST_GEOHEX({_render(geometry)}, {_render(precision)}, {_render(bounds)})" ) else: return InstrumentedExpression( f"ST_GEOHEX({_render(geometry)}, {_render(precision)})" ) def st_geohex_to_long(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a geohex grid-ID in string format into a long. :param grid_id: Input geohex grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOHEX_TO_LONG({_render(grid_id)})") def st_geohex_to_string(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a Geohex grid-ID in long format into a string. :param grid_id: Input Geohex grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOHEX_TO_STRING({_render(grid_id)})") def st_geotile( geometry: ExpressionType, precision: ExpressionType, bounds: ExpressionType = None ) -> InstrumentedExpression: """Calculates the `geotile` of the supplied geo_point at the specified precision. The result is long encoded. Use ST_GEOTILE_TO_STRING to convert the result to a string. These functions are related to the `geo_grid` query and the `geotile_grid` aggregation. :param geometry: Expression of type `geo_point`. If `null`, the function returns `null`. :param precision: Expression of type `integer`. If `null`, the function returns `null`. Valid values are between 0 and 29. :param bounds: Optional bounds to filter the grid tiles, a `geo_shape` of type `BBOX`. Use `ST_ENVELOPE` if the `geo_shape` is of any other type. """ if bounds is not None: return InstrumentedExpression( f"ST_GEOTILE({_render(geometry)}, {_render(precision)}, {_render(bounds)})" ) else: return InstrumentedExpression( f"ST_GEOTILE({_render(geometry)}, {_render(precision)})" ) def st_geotile_to_long(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a geotile grid-ID in string format into a long. :param grid_id: Input geotile grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOTILE_TO_LONG({_render(grid_id)})") def st_geotile_to_string(grid_id: ExpressionType) -> InstrumentedExpression: """Converts an input value representing a geotile grid-ID in long format into a string. :param grid_id: Input geotile grid-id. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"ST_GEOTILE_TO_STRING({_render(grid_id)})") def st_intersects( geom_a: ExpressionType, geom_b: ExpressionType ) -> InstrumentedExpression: """Returns true if two geometries intersect. They intersect if they have any point in common, including their interior points (points along lines or within polygons). This is the inverse of the ST_DISJOINT function. In mathematical terms: ST_Intersects(A, B) ⇔ A ⋂ B ≠ ∅ :param geom_a: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ return InstrumentedExpression( f"ST_INTERSECTS({_render(geom_a)}, {_render(geom_b)})" ) def st_within(geom_a: ExpressionType, geom_b: ExpressionType) -> InstrumentedExpression: """Returns whether the first geometry is within the second geometry. This is the inverse of the ST_CONTAINS function. :param geom_a: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. :param geom_b: Expression of type `geo_point`, `cartesian_point`, `geo_shape` or `cartesian_shape`. If `null`, the function returns `null`. The second parameter must also have the same coordinate system as the first. This means it is not possible to combine `geo_*` and `cartesian_*` parameters. """ return InstrumentedExpression(f"ST_WITHIN({_render(geom_a)}, {_render(geom_b)})") def st_x(point: ExpressionType) -> InstrumentedExpression: """Extracts the `x` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `longitude` value. :param point: Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_X({_render(point)})") def st_xmax(point: ExpressionType) -> InstrumentedExpression: """Extracts the maximum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `longitude` value. :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_XMAX({_render(point)})") def st_xmin(point: ExpressionType) -> InstrumentedExpression: """Extracts the minimum value of the `x` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `longitude` value. :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_XMIN({_render(point)})") def st_y(point: ExpressionType) -> InstrumentedExpression: """Extracts the `y` coordinate from the supplied point. If the points is of type `geo_point` this is equivalent to extracting the `latitude` value. :param point: Expression of type `geo_point` or `cartesian_point`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_Y({_render(point)})") def st_ymax(point: ExpressionType) -> InstrumentedExpression: """Extracts the maximum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the maximum `latitude` value. :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_YMAX({_render(point)})") def st_ymin(point: ExpressionType) -> InstrumentedExpression: """Extracts the minimum value of the `y` coordinates from the supplied geometry. If the geometry is of type `geo_point` or `geo_shape` this is equivalent to extracting the minimum `latitude` value. :param point: Expression of type `geo_point`, `geo_shape`, `cartesian_point` or `cartesian_shape`. If `null`, the function returns `null`. """ return InstrumentedExpression(f"ST_YMIN({_render(point)})") def substring( string: ExpressionType, start: ExpressionType, length: ExpressionType = None ) -> InstrumentedExpression: """Returns a substring of a string, specified by a start position and an optional length. :param string: String expression. If `null`, the function returns `null`. :param start: Start position. :param length: Length of the substring from the start position. Optional; if omitted, all positions after `start` are returned. """ if length is not None: return InstrumentedExpression( f"SUBSTRING({_render(string)}, {_render(start)}, {_render(length)})" ) else: return InstrumentedExpression(f"SUBSTRING({_render(string)}, {_render(start)})") def sum(number: ExpressionType) -> InstrumentedExpression: """The sum of a numeric expression. :param number: """ return InstrumentedExpression(f"SUM({_render(number)})") def tan(angle: ExpressionType) -> InstrumentedExpression: """Returns the tangent of an angle. :param angle: An angle, in radians. If `null`, the function returns `null`. """ return InstrumentedExpression(f"TAN({_render(angle)})") def tanh(number: ExpressionType) -> InstrumentedExpression: """Returns the hyperbolic tangent of a number. :param number: Numeric expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"TANH({_render(number)})") def tau() -> InstrumentedExpression: """Returns the ratio of a circle’s circumference to its radius.""" return InstrumentedExpression("TAU()") def term(field: ExpressionType, query: ExpressionType) -> InstrumentedExpression: """Performs a Term query on the specified field. Returns true if the provided term matches the row. :param field: Field that the query will target. :param query: Term you wish to find in the provided field. """ return InstrumentedExpression(f"TERM({_render(field)}, {_render(query)})") def top( field: ExpressionType, limit: ExpressionType, order: ExpressionType ) -> InstrumentedExpression: """Collects the top values for a field. Includes repeated values. :param field: The field to collect the top values for. :param limit: The maximum number of values to collect. :param order: The order to calculate the top values. Either `asc` or `desc`. """ return InstrumentedExpression( f"TOP({_render(field)}, {_render(limit)}, {_render(order)})" ) def to_aggregate_metric_double(number: ExpressionType) -> InstrumentedExpression: """Encode a numeric to an aggregate_metric_double. :param number: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_AGGREGATE_METRIC_DOUBLE({_render(number)})") def to_base64(string: ExpressionType) -> InstrumentedExpression: """Encode a string to a base64 string. :param string: A string. """ return InstrumentedExpression(f"TO_BASE64({_render(string)})") def to_boolean(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a boolean value. A string value of `true` will be case-insensitive converted to the Boolean `true`. For anything else, including the empty string, the function will return `false`. The numerical value of `0` will be converted to `false`, anything else will be converted to `true`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_BOOLEAN({_render(field)})") def to_cartesianpoint(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a `cartesian_point` value. A string will only be successfully converted if it respects the WKT Point format. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_CARTESIANPOINT({_render(field)})") def to_cartesianshape(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a `cartesian_shape` value. A string will only be successfully converted if it respects the WKT format. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_CARTESIANSHAPE({_render(field)})") def to_dateperiod(field: ExpressionType) -> InstrumentedExpression: """Converts an input value into a `date_period` value. :param field: Input value. The input is a valid constant date period expression. """ return InstrumentedExpression(f"TO_DATEPERIOD({_render(field)})") def to_datetime(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a date value. A string will only be successfully converted if it’s respecting the format `yyyy-MM-dd'T'HH:mm:ss.SSS'Z'`. To convert dates in other formats, use `DATE_PARSE`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_DATETIME({_render(field)})") def to_date_nanos(field: ExpressionType) -> InstrumentedExpression: """Converts an input to a nanosecond-resolution date value (aka date_nanos). :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_DATE_NANOS({_render(field)})") def to_degrees(number: ExpressionType) -> InstrumentedExpression: """Converts a number in radians to degrees). :param number: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_DEGREES({_render(number)})") def to_double(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a double value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the Unix epoch, converted to double. Boolean `true` will be converted to double `1.0`, `false` to `0.0`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_DOUBLE({_render(field)})") def to_geopoint(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a `geo_point` value. A string will only be successfully converted if it respects the WKT Point format. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_GEOPOINT({_render(field)})") def to_geoshape(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a `geo_shape` value. A string will only be successfully converted if it respects the WKT format. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_GEOSHAPE({_render(field)})") def to_integer(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to an integer value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the Unix epoch, converted to integer. Boolean `true` will be converted to integer `1`, `false` to `0`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_INTEGER({_render(field)})") def to_ip( field: ExpressionType, options: ExpressionType = None ) -> InstrumentedExpression: """Converts an input string to an IP value. :param field: Input value. The input can be a single- or multi-valued column or an expression. :param options: (Optional) Additional options. """ if options is not None: return InstrumentedExpression(f"TO_IP({_render(field)}, {_render(options)})") else: return InstrumentedExpression(f"TO_IP({_render(field)})") def to_long(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to a long value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the Unix epoch, converted to long. Boolean `true` will be converted to long `1`, `false` to `0`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_LONG({_render(field)})") def to_lower(str: ExpressionType) -> InstrumentedExpression: """Returns a new string representing the input string converted to lower case. :param str: String expression. If `null`, the function returns `null`. The input can be a single-valued column or expression, or a multi-valued column or expression. """ return InstrumentedExpression(f"TO_LOWER({_render(str)})") def to_radians(number: ExpressionType) -> InstrumentedExpression: """Converts a number in degrees) to radians. :param number: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_RADIANS({_render(number)})") def to_string(field: ExpressionType) -> InstrumentedExpression: """Converts an input value into a string. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_STRING({_render(field)})") def to_timeduration(field: ExpressionType) -> InstrumentedExpression: """Converts an input value into a `time_duration` value. :param field: Input value. The input is a valid constant time duration expression. """ return InstrumentedExpression(f"TO_TIMEDURATION({_render(field)})") def to_unsigned_long(field: ExpressionType) -> InstrumentedExpression: """Converts an input value to an unsigned long value. If the input parameter is of a date type, its value will be interpreted as milliseconds since the Unix epoch, converted to unsigned long. Boolean `true` will be converted to unsigned long `1`, `false` to `0`. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_UNSIGNED_LONG({_render(field)})") def to_upper(str: ExpressionType) -> InstrumentedExpression: """Returns a new string representing the input string converted to upper case. :param str: String expression. If `null`, the function returns `null`. The input can be a single-valued column or expression, or a multi-valued column or expression. """ return InstrumentedExpression(f"TO_UPPER({_render(str)})") def to_version(field: ExpressionType) -> InstrumentedExpression: """Converts an input string to a version value. :param field: Input value. The input can be a single- or multi-valued column or an expression. """ return InstrumentedExpression(f"TO_VERSION({_render(field)})") def trim(string: ExpressionType) -> InstrumentedExpression: """Removes leading and trailing whitespaces from a string. :param string: String expression. If `null`, the function returns `null`. """ return InstrumentedExpression(f"TRIM({_render(string)})") def values(field: ExpressionType) -> InstrumentedExpression: """Returns unique values as a multivalued field. The order of the returned values isn’t guaranteed. If you need the values returned in order use `MV_SORT`. :param field: """ return InstrumentedExpression(f"VALUES({_render(field)})") def weighted_avg( number: ExpressionType, weight: ExpressionType ) -> InstrumentedExpression: """The weighted average of a numeric expression. :param number: A numeric value. :param weight: A numeric weight. """ return InstrumentedExpression(f"WEIGHTED_AVG({_render(number)}, {_render(weight)})") python-elasticsearch-9.1.1/elasticsearch/exceptions.py000066400000000000000000000102571506101734100232230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Type from elastic_transport import ApiError as _ApiError from elastic_transport import ConnectionError as ConnectionError from elastic_transport import ConnectionTimeout as ConnectionTimeout from elastic_transport import SerializationError as SerializationError from elastic_transport import TlsError as SSLError from elastic_transport import TransportError as TransportError from elastic_transport import TransportWarning __all__ = [ "SerializationError", "TransportError", "ConnectionError", "SSLError", "ConnectionTimeout", "AuthorizationException", "AuthenticationException", "NotFoundError", "ConflictError", "BadRequestError", ] class ApiError(_ApiError): @property def status_code(self) -> int: """Backwards-compatible way to access ``self.meta.status``""" return self.meta.status @property def error(self) -> str: """Backwards-compatible way to access ``self.message``""" return self.message @property def info(self) -> Any: """Backwards-compatible way to access ``self.body``""" return self.body def __str__(self) -> str: cause = "" try: if self.body and isinstance(self.body, dict) and "error" in self.body: if isinstance(self.body["error"], dict): root_cause = self.body["error"]["root_cause"][0] caused_by = self.body["error"].get("caused_by", {}) cause = ", ".join( filter( None, [ repr(root_cause["reason"]), root_cause.get("resource.id"), root_cause.get("resource.type"), caused_by.get("reason"), ], ) ) else: cause = repr(self.body["error"]) except LookupError: pass msg = ", ".join(filter(None, [str(self.status_code), repr(self.error), cause])) return f"{self.__class__.__name__}({msg})" class UnsupportedProductError(ApiError): """Error which is raised when the client detects it's not connected to a supported product. """ def __str__(self) -> str: return self.message class NotFoundError(ApiError): """Exception representing a 404 status code.""" class ConflictError(ApiError): """Exception representing a 409 status code.""" class BadRequestError(ApiError): """Exception representing a 400 status code.""" class AuthenticationException(ApiError): """Exception representing a 401 status code.""" class AuthorizationException(ApiError): """Exception representing a 403 status code.""" class ElasticsearchWarning(TransportWarning): """Warning that is raised when a deprecated option or incorrect usage is flagged via the 'Warning' HTTP header. """ class GeneralAvailabilityWarning(TransportWarning): """Warning that is raised when a feature is not yet GA.""" # Aliases for backwards compatibility ElasticsearchDeprecationWarning = ElasticsearchWarning RequestError = BadRequestError HTTP_EXCEPTIONS: Dict[int, Type[ApiError]] = { 400: BadRequestError, 401: AuthenticationException, 403: AuthorizationException, 404: NotFoundError, 409: ConflictError, } python-elasticsearch-9.1.1/elasticsearch/helpers/000077500000000000000000000000001506101734100221255ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/helpers/__init__.py000066400000000000000000000026761506101734100242510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from .._async.helpers import async_bulk, async_reindex, async_scan, async_streaming_bulk from .._utils import fixup_module_metadata from .actions import _chunk_actions # noqa: F401 from .actions import _process_bulk_chunk # noqa: F401 from .actions import bulk, expand_action, parallel_bulk, reindex, scan, streaming_bulk from .errors import BulkIndexError, ScanError __all__ = [ "BulkIndexError", "ScanError", "expand_action", "streaming_bulk", "bulk", "parallel_bulk", "scan", "reindex", "async_scan", "async_bulk", "async_reindex", "async_streaming_bulk", ] fixup_module_metadata(__name__, globals()) del fixup_module_metadata python-elasticsearch-9.1.1/elasticsearch/helpers/actions.py000066400000000000000000000771721506101734100241550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import time from operator import methodcaller from queue import Queue from typing import ( Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping, MutableMapping, Optional, Tuple, Union, ) from elastic_transport import OpenTelemetrySpan from .. import Elasticsearch from ..compat import to_bytes from ..exceptions import ApiError, NotFoundError, TransportError from ..serializer import Serializer from .errors import BulkIndexError, ScanError logger = logging.getLogger("elasticsearch.helpers") _TYPE_BULK_ACTION = Union[bytes, str, Dict[str, Any]] _TYPE_BULK_ACTION_HEADER = Dict[str, Any] _TYPE_BULK_ACTION_BODY = Union[None, bytes, Dict[str, Any]] _TYPE_BULK_ACTION_HEADER_AND_BODY = Tuple[ _TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY ] def expand_action(data: _TYPE_BULK_ACTION) -> _TYPE_BULK_ACTION_HEADER_AND_BODY: """ From one document or action definition passed in by the user extract the action/data lines needed for elasticsearch's :meth:`~elasticsearch.Elasticsearch.bulk` api. """ # when given a string, assume user wants to index raw json if isinstance(data, (bytes, str)): return {"index": {}}, to_bytes(data, "utf-8") # make sure we don't alter the action data = data.copy() op_type: str = data.pop("_op_type", "index") action: Dict[str, Any] = {op_type: {}} # If '_source' is a dict use it for source # otherwise if op_type == 'update' then # '_source' should be in the metadata. if ( op_type == "update" and "_source" in data and not isinstance(data["_source"], Mapping) ): action[op_type]["_source"] = data.pop("_source") for key in ( "_id", "_index", "_if_seq_no", "_if_primary_term", "_parent", "_percolate", "_retry_on_conflict", "_routing", "_timestamp", "_type", "_version", "_version_type", "if_seq_no", "if_primary_term", "parent", "pipeline", "retry_on_conflict", "routing", "version", "version_type", ): if key in data: if key in { "_if_seq_no", "_if_primary_term", "_parent", "_retry_on_conflict", "_routing", "_version", "_version_type", }: action[op_type][key[1:]] = data.pop(key) else: action[op_type][key] = data.pop(key) # no data payload for delete if op_type == "delete": return action, None return action, data.get("_source", data) class _ActionChunker: def __init__( self, chunk_size: int, max_chunk_bytes: int, serializer: Serializer ) -> None: self.chunk_size = chunk_size self.max_chunk_bytes = max_chunk_bytes self.serializer = serializer self.size = 0 self.action_count = 0 self.bulk_actions: List[bytes] = [] self.bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ] = [] def feed( self, action: _TYPE_BULK_ACTION_HEADER, data: _TYPE_BULK_ACTION_BODY ) -> Optional[ Tuple[ List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], List[bytes], ] ]: ret = None raw_action = action raw_data = data action_bytes = to_bytes(self.serializer.dumps(action), "utf-8") # +1 to account for the trailing new line character cur_size = len(action_bytes) + 1 data_bytes: Optional[bytes] if data is not None: data_bytes = to_bytes(self.serializer.dumps(data), "utf-8") cur_size += len(data_bytes) + 1 else: data_bytes = None # full chunk, send it and start a new one if self.bulk_actions and ( self.size + cur_size > self.max_chunk_bytes or self.action_count == self.chunk_size ): ret = (self.bulk_data, self.bulk_actions) self.bulk_actions = [] self.bulk_data = [] self.size = 0 self.action_count = 0 self.bulk_actions.append(action_bytes) if data_bytes is not None: self.bulk_actions.append(data_bytes) self.bulk_data.append((raw_action, raw_data)) else: self.bulk_data.append((raw_action,)) self.size += cur_size self.action_count += 1 return ret def flush( self, ) -> Optional[ Tuple[ List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], List[bytes], ] ]: ret = None if self.bulk_actions: ret = (self.bulk_data, self.bulk_actions) self.bulk_actions = [] self.bulk_data = [] return ret def _chunk_actions( actions: Iterable[_TYPE_BULK_ACTION_HEADER_AND_BODY], chunk_size: int, max_chunk_bytes: int, serializer: Serializer, ) -> Iterable[ Tuple[ List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], List[bytes], ] ]: """ Split actions into chunks by number or size, serialize them into strings in the process. """ chunker = _ActionChunker( chunk_size=chunk_size, max_chunk_bytes=max_chunk_bytes, serializer=serializer ) for action, data in actions: ret = chunker.feed(action, data) if ret: yield ret ret = chunker.flush() if ret: yield ret def _process_bulk_chunk_success( resp: Dict[str, Any], bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], ignore_status: Collection[int], raise_on_error: bool = True, ) -> Iterator[Tuple[bool, Dict[str, Any]]]: # if raise on error is set, we need to collect errors per chunk before raising them errors = [] # go through request-response pairs and detect failures for data, (op_type, item) in zip( bulk_data, map(methodcaller("popitem"), resp["items"]) ): status_code = item.get("status", 500) ok = 200 <= status_code < 300 if not ok and raise_on_error and status_code not in ignore_status: # include original document source if len(data) > 1: item["data"] = data[1] errors.append({op_type: item}) if ok or not errors: # if we are not just recording all errors to be able to raise # them all at once, yield items individually yield ok, {op_type: item} if errors: raise BulkIndexError(f"{len(errors)} document(s) failed to index.", errors) def _process_bulk_chunk_error( error: ApiError, bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], ignore_status: Collection[int], raise_on_exception: bool = True, raise_on_error: bool = True, ) -> Iterable[Tuple[bool, Dict[str, Any]]]: # default behavior - just propagate exception if raise_on_exception and error.status_code not in ignore_status: raise error # if we are not propagating, mark all actions in current chunk as failed err_message = str(error) exc_errors = [] for data in bulk_data: # collect all the information about failed actions op_type, action = data[0].copy().popitem() info = {"error": err_message, "status": error.status_code, "exception": error} if op_type != "delete" and len(data) > 1: info["data"] = data[1] info.update(action) exc_errors.append({op_type: info}) # emulate standard behavior for failed actions if raise_on_error and error.status_code not in ignore_status: raise BulkIndexError( f"{len(exc_errors)} document(s) failed to index.", exc_errors ) else: for err in exc_errors: yield False, err def _process_bulk_chunk( client: Elasticsearch, bulk_actions: List[bytes], bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ], otel_span: OpenTelemetrySpan, raise_on_exception: bool = True, raise_on_error: bool = True, ignore_status: Union[int, Collection[int]] = (), *args: Any, **kwargs: Any, ) -> Iterable[Tuple[bool, Dict[str, Any]]]: """ Send a bulk request to elasticsearch and process the output. """ with client._otel.use_span(otel_span): if isinstance(ignore_status, int): ignore_status = (ignore_status,) try: # send the actual request resp = client.bulk(*args, operations=bulk_actions, **kwargs) # type: ignore[arg-type] except ApiError as e: gen = _process_bulk_chunk_error( error=e, bulk_data=bulk_data, ignore_status=ignore_status, raise_on_exception=raise_on_exception, raise_on_error=raise_on_error, ) else: gen = _process_bulk_chunk_success( resp=resp.body, bulk_data=bulk_data, ignore_status=ignore_status, raise_on_error=raise_on_error, ) yield from gen def streaming_bulk( client: Elasticsearch, actions: Iterable[_TYPE_BULK_ACTION], chunk_size: int = 500, max_chunk_bytes: int = 100 * 1024 * 1024, raise_on_error: bool = True, expand_action_callback: Callable[ [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY ] = expand_action, raise_on_exception: bool = True, max_retries: int = 0, initial_backoff: float = 2, max_backoff: float = 600, yield_ok: bool = True, ignore_status: Union[int, Collection[int]] = (), retry_on_status: Union[int, Collection[int]] = (429,), span_name: str = "helpers.streaming_bulk", *args: Any, **kwargs: Any, ) -> Iterable[Tuple[bool, Dict[str, Any]]]: """ Streaming bulk consumes actions from the iterable passed in and yields results per action. For non-streaming usecases use :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. If you specify ``max_retries`` it will also retry any documents that were rejected with a ``429`` status code. Use ``retry_on_status`` to configure which status codes will be retried. To do this it will wait (**by calling time.sleep which will block**) for ``initial_backoff`` seconds and then, every subsequent rejection for the same chunk, for double the time every time up to ``max_backoff`` seconds. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). :arg retry_on_status: HTTP status code that will trigger a retry. (if `None` is specified only status 429 will retry). :arg max_retries: maximum number of times a document will be retried when retry_on_status (defaulting to ``429``) is received, set to 0 (default) for no retries :arg initial_backoff: number of seconds we should wait before the first retry. Any subsequent retries will be powers of ``initial_backoff * 2**retry_number`` :arg max_backoff: maximum number of seconds a retry will wait :arg yield_ok: if set to False will skip successful documents in the output :arg ignore_status: list of HTTP status code that you want to ignore """ with client._otel.helpers_span(span_name) as otel_span: client = client.options() client._client_meta = (("h", "bp"),) if isinstance(retry_on_status, int): retry_on_status = (retry_on_status,) serializer = client.transport.serializers.get_serializer("application/json") bulk_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ] bulk_actions: List[bytes] for bulk_data, bulk_actions in _chunk_actions( map(expand_action_callback, actions), chunk_size, max_chunk_bytes, serializer, ): for attempt in range(max_retries + 1): to_retry: List[bytes] = [] to_retry_data: List[ Union[ Tuple[_TYPE_BULK_ACTION_HEADER], Tuple[_TYPE_BULK_ACTION_HEADER, _TYPE_BULK_ACTION_BODY], ] ] = [] if attempt: time.sleep(min(max_backoff, initial_backoff * 2 ** (attempt - 1))) try: for data, (ok, info) in zip( bulk_data, _process_bulk_chunk( client, bulk_actions, bulk_data, otel_span, raise_on_exception, raise_on_error, ignore_status, *args, **kwargs, ), ): if not ok: action, info = info.popitem() # retry if retries enabled, we are not in the last attempt, # and status in retry_on_status (defaulting to 429) if ( max_retries and info["status"] in retry_on_status and (attempt + 1) <= max_retries ): # _process_bulk_chunk expects bytes so we need to # re-serialize the data to_retry.extend(map(serializer.dumps, data)) to_retry_data.append(data) else: yield ok, {action: info} elif yield_ok: yield ok, info except ApiError as e: # suppress any status in retry_on_status (429 by default) # since we will retry them if attempt == max_retries or e.status_code not in retry_on_status: raise else: if not to_retry: break # retry only subset of documents that didn't succeed bulk_actions, bulk_data = to_retry, to_retry_data def bulk( client: Elasticsearch, actions: Iterable[_TYPE_BULK_ACTION], stats_only: bool = False, ignore_status: Union[int, Collection[int]] = (), *args: Any, **kwargs: Any, ) -> Tuple[int, Union[int, List[Dict[str, Any]]]]: """ Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if ``stats_only`` is set to ``True``. Note that by default we raise a ``BulkIndexError`` when we encounter an error so options like ``stats_only`` only apply when ``raise_on_error`` is set to ``False``. When errors are being collected original document data is included in the error dictionary which can lead to an extra high memory usage. If you need to process a lot of data and want to ignore/collect errors please consider using the :func:`~elasticsearch.helpers.streaming_bulk` helper which will just return the errors and not store them in memory. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses :arg ignore_status: list of HTTP status code that you want to ignore Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation, see :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters. """ success, failed = 0, 0 # list of errors to be collected is not stats_only errors = [] # make streaming_bulk yield successful results so we can count them kwargs["yield_ok"] = True for ok, item in streaming_bulk( client, actions, ignore_status=ignore_status, span_name="helpers.bulk", *args, **kwargs # type: ignore[misc] ): # go through request-response pairs and detect failures if not ok: if not stats_only: errors.append(item) failed += 1 else: success += 1 return success, failed if stats_only else errors def parallel_bulk( client: Elasticsearch, actions: Iterable[_TYPE_BULK_ACTION], thread_count: int = 4, chunk_size: int = 500, max_chunk_bytes: int = 100 * 1024 * 1024, queue_size: int = 4, expand_action_callback: Callable[ [_TYPE_BULK_ACTION], _TYPE_BULK_ACTION_HEADER_AND_BODY ] = expand_action, ignore_status: Union[int, Collection[int]] = (), *args: Any, **kwargs: Any, ) -> Iterable[Tuple[bool, Any]]: """ Parallel version of the bulk helper run in multiple threads at once. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg thread_count: size of the threadpool to use for the bulk requests :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). :arg queue_size: size of the task queue between the main thread (producing chunks to send) and the processing threads. :arg ignore_status: list of HTTP status code that you want to ignore """ # Avoid importing multiprocessing unless parallel_bulk is used # to avoid exceptions on restricted environments like App Engine from multiprocessing.pool import ThreadPool expanded_actions = map(expand_action_callback, actions) serializer = client.transport.serializers.get_serializer("application/json") class BlockingPool(ThreadPool): def _setup_queues(self) -> None: super()._setup_queues() # type: ignore[misc] # The queue must be at least the size of the number of threads to # prevent hanging when inserting sentinel values during teardown. self._inqueue: Queue[ Tuple[ List[ Union[ Tuple[Dict[str, Any]], Tuple[Dict[str, Any], Dict[str, Any]] ] ], List[bytes], ] ] = Queue(max(queue_size, thread_count)) self._quick_put = self._inqueue.put with client._otel.helpers_span("helpers.parallel_bulk") as otel_span: pool = BlockingPool(thread_count) try: for result in pool.imap( lambda bulk_chunk: list( _process_bulk_chunk( client, bulk_chunk[1], bulk_chunk[0], otel_span=otel_span, ignore_status=ignore_status, # type: ignore[misc] *args, **kwargs, ) ), _chunk_actions( expanded_actions, chunk_size, max_chunk_bytes, serializer ), ): yield from result finally: pool.close() pool.join() def scan( client: Elasticsearch, query: Optional[Any] = None, scroll: str = "5m", raise_on_error: bool = True, preserve_order: bool = False, size: int = 1000, request_timeout: Optional[float] = None, clear_scroll: bool = True, scroll_kwargs: Optional[MutableMapping[str, Any]] = None, **kwargs: Any, ) -> Iterable[Dict[str, Any]]: """ Simple abstraction on top of the :meth:`~elasticsearch.Elasticsearch.scroll` api - a simple iterator that yields all hits as returned by underlining scroll requests. By default scan does not return results in any pre-determined order. To have a standard order in the returned documents (either by score or explicit sort definition) when scrolling, use ``preserve_order=True``. This may be an expensive operation and will negate the performance benefits of using ``scan``. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg raise_on_error: raises an exception (``ScanError``) if an error is encountered (some shards fail to execute). By default we raise. :arg preserve_order: don't set the ``search_type`` to ``scan`` - this will cause the scroll to paginate with preserving the order. Note that this can be an extremely expensive operation and can easily lead to unpredictable results, use with caution. :arg size: size (per shard) of the batch send at each iteration. :arg request_timeout: explicit timeout for each call to ``scan`` :arg clear_scroll: explicitly calls delete on the scroll id via the clear scroll API at the end of the method on completion or error, defaults to true. :arg scroll_kwargs: additional kwargs to be passed to :meth:`~elasticsearch.Elasticsearch.scroll` Any additional keyword arguments will be passed to the initial :meth:`~elasticsearch.Elasticsearch.search` call:: scan(client, query={"query": {"match": {"title": "python"}}}, index="orders-*", doc_type="books" ) """ scroll_kwargs = scroll_kwargs or {} if not preserve_order: query = query.copy() if query else {} query["sort"] = "_doc" def pop_transport_kwargs(kw: MutableMapping[str, Any]) -> Dict[str, Any]: # Grab options that should be propagated to every # API call within this helper instead of just 'search()' transport_kwargs = {} for key in ( "headers", "api_key", "http_auth", "basic_auth", "bearer_auth", "opaque_id", ): try: value = kw.pop(key) if key == "http_auth": key = "basic_auth" transport_kwargs[key] = value except KeyError: pass return transport_kwargs client = client.options( request_timeout=request_timeout, **pop_transport_kwargs(kwargs) ) client._client_meta = (("h", "s"),) # Setting query={"from": ...} would make 'from' be used # as a keyword argument instead of 'from_'. We handle that here. def normalize_from_keyword(kw: MutableMapping[str, Any]) -> None: if "from" in kw: kw["from_"] = kw.pop("from") normalize_from_keyword(kwargs) try: search_kwargs = query.copy() if query else {} normalize_from_keyword(search_kwargs) search_kwargs.update(kwargs) search_kwargs["scroll"] = scroll search_kwargs["size"] = size resp = client.search(**search_kwargs) # Try the old deprecated way if we fail immediately on parameters. except TypeError: search_kwargs = kwargs.copy() search_kwargs["scroll"] = scroll search_kwargs["size"] = size resp = client.search(body=query, **search_kwargs) scroll_id = resp.get("_scroll_id") scroll_transport_kwargs = pop_transport_kwargs(scroll_kwargs) if scroll_transport_kwargs: scroll_client = client.options(**scroll_transport_kwargs) else: scroll_client = client try: while scroll_id and resp["hits"]["hits"]: yield from resp["hits"]["hits"] # Default to 0 if the value isn't included in the response shards_info: Dict[str, int] = resp["_shards"] shards_successful = shards_info.get("successful", 0) shards_skipped = shards_info.get("skipped", 0) shards_total = shards_info.get("total", 0) # check if we have any errors if (shards_successful + shards_skipped) < shards_total: shards_message = "Scroll request has only succeeded on %d (+%d skipped) shards out of %d." logger.warning( shards_message, shards_successful, shards_skipped, shards_total, ) if raise_on_error: raise ScanError( scroll_id, shards_message % ( shards_successful, shards_skipped, shards_total, ), ) resp = scroll_client.scroll( scroll_id=scroll_id, scroll=scroll, **scroll_kwargs ) scroll_id = resp.get("_scroll_id") finally: if scroll_id and clear_scroll: client.options(ignore_status=404).clear_scroll(scroll_id=scroll_id) def reindex( client: Elasticsearch, source_index: Union[str, Collection[str]], target_index: str, query: Optional[Any] = None, target_client: Optional[Elasticsearch] = None, chunk_size: int = 500, scroll: str = "5m", op_type: Optional[str] = None, scan_kwargs: MutableMapping[str, Any] = {}, bulk_kwargs: MutableMapping[str, Any] = {}, ) -> Tuple[int, Union[int, List[Dict[str, Any]]]]: """ Reindex all documents from one index that satisfy a given query to another, potentially (if `target_client` is specified) on a different cluster. If you don't specify the query you will reindex all the documents. Since ``2.3`` a :meth:`~elasticsearch.Elasticsearch.reindex` api is available as part of elasticsearch itself. It is recommended to use the api instead of this helper wherever possible. The helper is here mostly for backwards compatibility and for situations where more flexibility is needed. .. note:: This helper doesn't transfer mappings, just the data. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for read if `target_client` is specified as well) :arg source_index: index (or list of indices) to read documents from :arg target_index: name of the index in the target cluster to populate :arg query: body for the :meth:`~elasticsearch.Elasticsearch.search` api :arg target_client: optional, is specified will be used for writing (thus enabling reindex between clusters) :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg op_type: Explicit operation type. Defaults to '_index'. Data streams must be set to 'create'. If not specified, will auto-detect if target_index is a data stream. :arg scan_kwargs: additional kwargs to be passed to :func:`~elasticsearch.helpers.scan` :arg bulk_kwargs: additional kwargs to be passed to :func:`~elasticsearch.helpers.bulk` """ target_client = client if target_client is None else target_client docs = scan(client, query=query, index=source_index, scroll=scroll, **scan_kwargs) def _change_doc_index( hits: Iterable[Dict[str, Any]], index: str, op_type: Optional[str] ) -> Iterable[Dict[str, Any]]: for h in hits: h["_index"] = index if op_type is not None: h["_op_type"] = op_type if "fields" in h: h.update(h.pop("fields")) yield h kwargs = {"stats_only": True} kwargs.update(bulk_kwargs) is_data_stream = False try: # Verify if the target_index is data stream or index data_streams = target_client.indices.get_data_stream( name=target_index, expand_wildcards="all" ) is_data_stream = any( data_stream["name"] == target_index for data_stream in data_streams["data_streams"] ) except (TransportError, KeyError, NotFoundError): # If its not data stream, might be index pass if is_data_stream: if op_type not in (None, "create"): raise ValueError("Data streams must have 'op_type' set to 'create'") else: op_type = "create" return bulk( target_client, _change_doc_index(docs, target_index, op_type), chunk_size=chunk_size, **kwargs, ) python-elasticsearch-9.1.1/elasticsearch/helpers/errors.py000066400000000000000000000027421506101734100240200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, List, Tuple, Type class BulkIndexError(Exception): def __init__(self, message: str, errors: List[Dict[str, Any]]): super().__init__(message) self.errors: List[Dict[str, Any]] = errors def __reduce__( self, ) -> Tuple[Type["BulkIndexError"], Tuple[str, List[Dict[str, Any]]]]: return (self.__class__, (self.args[0], self.errors)) class ScanError(Exception): scroll_id: str def __init__(self, scroll_id: str, *args: Any) -> None: super().__init__(*args) self.scroll_id = scroll_id def __reduce__(self) -> Tuple[Type["ScanError"], Tuple[str, str]]: return (self.__class__, (self.scroll_id,) + self.args) python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/000077500000000000000000000000001506101734100245045ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/__init__.py000066400000000000000000000042201506101734100266130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from elasticsearch.helpers.vectorstore._async.embedding_service import ( AsyncElasticsearchEmbeddings, AsyncEmbeddingService, ) from elasticsearch.helpers.vectorstore._async.strategies import ( AsyncBM25Strategy, AsyncDenseVectorScriptScoreStrategy, AsyncDenseVectorStrategy, AsyncRetrievalStrategy, AsyncSparseVectorStrategy, ) from elasticsearch.helpers.vectorstore._async.vectorstore import AsyncVectorStore from elasticsearch.helpers.vectorstore._sync.embedding_service import ( ElasticsearchEmbeddings, EmbeddingService, ) from elasticsearch.helpers.vectorstore._sync.strategies import ( BM25Strategy, DenseVectorScriptScoreStrategy, DenseVectorStrategy, RetrievalStrategy, SparseVectorStrategy, ) from elasticsearch.helpers.vectorstore._sync.vectorstore import VectorStore from elasticsearch.helpers.vectorstore._utils import DistanceMetric __all__ = [ "AsyncBM25Strategy", "AsyncDenseVectorScriptScoreStrategy", "AsyncDenseVectorStrategy", "AsyncElasticsearchEmbeddings", "AsyncEmbeddingService", "AsyncRetrievalStrategy", "AsyncSparseVectorStrategy", "AsyncVectorStore", "BM25Strategy", "DenseVectorScriptScoreStrategy", "DenseVectorStrategy", "DistanceMetric", "ElasticsearchEmbeddings", "EmbeddingService", "RetrievalStrategy", "SparseVectorStrategy", "VectorStore", ] python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/000077500000000000000000000000001506101734100257605ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/__init__.py000066400000000000000000000014231506101734100300710ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/_utils.py000066400000000000000000000031101506101734100276240ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from elasticsearch import AsyncElasticsearch, BadRequestError, NotFoundError async def model_must_be_deployed(client: AsyncElasticsearch, model_id: str) -> None: """ :raises [NotFoundError]: if the model is neither downloaded nor deployed. :raises [ConflictError]: if the model is downloaded but not yet deployed. """ doc = {"text_field": f"test if the model '{model_id}' is deployed"} try: await client.ml.infer_trained_model(model_id=model_id, docs=[doc]) except BadRequestError: # The model is deployed but expects a different input field name. pass async def model_is_deployed(client: AsyncElasticsearch, model_id: str) -> bool: try: await model_must_be_deployed(client, model_id) return True except NotFoundError: return False python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/embedding_service.py000066400000000000000000000071071506101734100317750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from abc import ABC, abstractmethod from typing import List from elasticsearch import AsyncElasticsearch from elasticsearch._version import __versionstr__ as lib_version class AsyncEmbeddingService(ABC): @abstractmethod async def embed_documents(self, texts: List[str]) -> List[List[float]]: """Generate embeddings for a list of documents. :param texts: A list of document strings to generate embeddings for. :return: A list of embeddings, one for each document in the input. """ @abstractmethod async def embed_query(self, query: str) -> List[float]: """Generate an embedding for a single query text. :param text: The query text to generate an embedding for. :return: The embedding for the input query text. """ class AsyncElasticsearchEmbeddings(AsyncEmbeddingService): """Elasticsearch as a service for embedding model inference. You need to have an embedding model downloaded and deployed in Elasticsearch: - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, *, client: AsyncElasticsearch, model_id: str, input_field: str = "text_field", user_agent: str = f"elasticsearch-py-es/{lib_version}", ): """ :param agent_header: user agent header specific to the 3rd party integration. Used for usage tracking in Elastic Cloud. :param model_id: The model_id of the model deployed in the Elasticsearch cluster. :param input_field: The name of the key for the input text field in the document. Defaults to 'text_field'. :param client: Elasticsearch client connection. Alternatively specify the Elasticsearch connection with the other es_* parameters. """ # Add integration-specific usage header for tracking usage in Elastic Cloud. # client.options preserves existing (non-user-agent) headers. client = client.options(headers={"User-Agent": user_agent}) self.client = client self.model_id = model_id self.input_field = input_field async def embed_documents(self, texts: List[str]) -> List[List[float]]: return await self._embedding_func(texts) async def embed_query(self, text: str) -> List[float]: result = await self._embedding_func([text]) return result[0] async def _embedding_func(self, texts: List[str]) -> List[List[float]]: response = await self.client.ml.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) return [doc["predicted_value"] for doc in response["inference_results"]] python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/strategies.py000066400000000000000000000374611506101734100305170ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Union, cast from elasticsearch import AsyncElasticsearch from elasticsearch.helpers.vectorstore._async._utils import model_must_be_deployed from elasticsearch.helpers.vectorstore._utils import DistanceMetric class AsyncRetrievalStrategy(ABC): @abstractmethod def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: """ Returns the Elasticsearch query body for the given parameters. The store will execute the query. :param query: The text query. Can be None if query_vector is given. :param k: The total number of results to retrieve. :param num_candidates: The number of results to fetch initially in knn search. :param filter: List of filter clauses to apply to the query. :param query_vector: The query vector. Can be None if a query string is given. :return: The Elasticsearch query body. """ @abstractmethod def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ Create the required index and do necessary preliminary work, like creating inference pipelines or checking if a required model was deployed. :param client: Elasticsearch client connection. :param text_field: The field containing the text data in the index. :param vector_field: The field containing the vector representations in the index. :param num_dimensions: If vectors are indexed, how many dimensions do they have. :return: Dictionary with field and field type pairs that describe the schema. """ async def before_index_creation( self, *, client: AsyncElasticsearch, text_field: str, vector_field: str ) -> None: """ Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Defaults to a no-op. :param client: The Elasticsearch client. :param text_field: The field containing the text data in the index. :param vector_field: The field containing the vector representations in the index. """ pass def needs_inference(self) -> bool: """ Some retrieval strategies index embedding vectors and allow search by embedding vector, for example the `DenseVectorStrategy` strategy. Mapping a user input query string to an embedding vector is called inference. Inference can be applied in Elasticsearch (using a `model_id`) or outside of Elasticsearch (using an `EmbeddingService` defined on the `VectorStore`). In the latter case, this method has to return True. """ return False class AsyncSparseVectorStrategy(AsyncRetrievalStrategy): """Sparse retrieval strategy using the `sparse_vector` processor.""" def __init__(self, model_id: str = ".elser_model_2"): self.model_id = model_id self._tokens_field = "tokens" self._pipeline_name = f"{self.model_id}_sparse_embedding" def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: if query_vector: raise ValueError( "Cannot do sparse retrieval with a query_vector. " "Inference is currently always applied in Elasticsearch." ) if query is None: raise ValueError("please specify a query string") return { "query": { "bool": { "must": [ { "sparse_vector": { "field": f"{vector_field}.{self._tokens_field}", "inference_id": self.model_id, "query": query, } } ], "filter": filter, } } } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: mappings: Dict[str, Any] = { "properties": { vector_field: { "properties": {self._tokens_field: {"type": "sparse_vector"}} } } } settings = {"default_pipeline": self._pipeline_name} return mappings, settings async def before_index_creation( self, *, client: AsyncElasticsearch, text_field: str, vector_field: str ) -> None: if self.model_id: await model_must_be_deployed(client, self.model_id) # Create a pipeline for the model await client.ingest.put_pipeline( id=self._pipeline_name, description="Embedding pipeline for Python VectorStore", processors=[ { "inference": { "model_id": self.model_id, "input_output": [ { "input_field": text_field, "output_field": f"{vector_field}.{self._tokens_field}", }, ], } } ], ) class AsyncDenseVectorStrategy(AsyncRetrievalStrategy): """K-nearest-neighbors retrieval.""" def __init__( self, *, distance: DistanceMetric = DistanceMetric.COSINE, model_id: Optional[str] = None, hybrid: bool = False, rrf: Union[bool, Dict[str, Any]] = True, text_field: Optional[str] = "text_field", ): if hybrid and not text_field: raise ValueError( "to enable hybrid you have to specify a text_field (for BM25Strategy matching)" ) self.distance = distance self.model_id = model_id self.hybrid = hybrid self.rrf = rrf self.text_field = text_field def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: knn = { "filter": filter, "field": vector_field, "k": k, "num_candidates": num_candidates, } if query_vector is not None: knn["query_vector"] = query_vector else: # Inference in Elasticsearch. When initializing we make sure to always have # a model_id if don't have an embedding_service. knn["query_vector_builder"] = { "text_embedding": { "model_id": self.model_id, "model_text": query, } } if self.hybrid: return self._hybrid(query=cast(str, query), knn=knn, filter=filter) return {"knn": knn} def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: if self.distance is DistanceMetric.COSINE: similarity = "cosine" elif self.distance is DistanceMetric.EUCLIDEAN_DISTANCE: similarity = "l2_norm" elif self.distance is DistanceMetric.DOT_PRODUCT: similarity = "dot_product" elif self.distance is DistanceMetric.MAX_INNER_PRODUCT: similarity = "max_inner_product" else: raise ValueError(f"Similarity {self.distance} not supported.") mappings: Dict[str, Any] = { "properties": { vector_field: { "type": "dense_vector", "dims": num_dimensions, "index": True, "similarity": similarity, }, } } return mappings, {} async def before_index_creation( self, *, client: AsyncElasticsearch, text_field: str, vector_field: str ) -> None: if self.model_id: await model_must_be_deployed(client, self.model_id) def _hybrid( self, query: str, knn: Dict[str, Any], filter: List[Dict[str, Any]] ) -> Dict[str, Any]: # Add a query to the knn query. # RRF is used to even the score from the knn query and text query # RRF has two optional parameters: {'rank_constant':int, 'rank_window_size':int} # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html standard_query = { "query": { "bool": { "must": [ { "match": { self.text_field: { "query": query, } } } ], "filter": filter, } } } if self.rrf is False: query_body = { "knn": knn, **standard_query, } else: rrf_options = {} if isinstance(self.rrf, Dict): if "rank_constant" in self.rrf: rrf_options["rank_constant"] = self.rrf["rank_constant"] if "window_size" in self.rrf: # 'window_size' was renamed to 'rank_window_size', but we support # the older name for backwards compatibility rrf_options["rank_window_size"] = self.rrf["window_size"] if "rank_window_size" in self.rrf: rrf_options["rank_window_size"] = self.rrf["rank_window_size"] query_body = { "retriever": { "rrf": { "retrievers": [ {"standard": standard_query}, {"knn": knn}, ], **rrf_options, }, }, } return query_body def needs_inference(self) -> bool: return not self.model_id class AsyncDenseVectorScriptScoreStrategy(AsyncRetrievalStrategy): """Exact nearest neighbors retrieval using the `script_score` query.""" def __init__(self, distance: DistanceMetric = DistanceMetric.COSINE) -> None: self.distance = distance def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: if not query_vector: raise ValueError("specify a query_vector") if self.distance is DistanceMetric.COSINE: similarity_algo = ( f"cosineSimilarity(params.query_vector, '{vector_field}') + 1.0" ) elif self.distance is DistanceMetric.EUCLIDEAN_DISTANCE: similarity_algo = f"1 / (1 + l2norm(params.query_vector, '{vector_field}'))" elif self.distance is DistanceMetric.DOT_PRODUCT: similarity_algo = f""" double value = dotProduct(params.query_vector, '{vector_field}'); return sigmoid(1, Math.E, -value); """ elif self.distance is DistanceMetric.MAX_INNER_PRODUCT: similarity_algo = f""" double value = dotProduct(params.query_vector, '{vector_field}'); if (dotProduct < 0) {{ return 1 / (1 + -1 * dotProduct); }} return dotProduct + 1; """ else: raise ValueError(f"Similarity {self.distance} not supported.") query_bool: Dict[str, Any] = {"match_all": {}} if filter: query_bool = {"bool": {"filter": filter}} return { "query": { "script_score": { "query": query_bool, "script": { "source": similarity_algo, "params": {"query_vector": query_vector}, }, }, } } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: mappings = { "properties": { vector_field: { "type": "dense_vector", "dims": num_dimensions, "index": False, } } } return mappings, {} def needs_inference(self) -> bool: return True class AsyncBM25Strategy(AsyncRetrievalStrategy): def __init__( self, k1: Optional[float] = None, b: Optional[float] = None, ): self.k1 = k1 self.b = b def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: return { "query": { "bool": { "must": [ { "match": { text_field: { "query": query, } }, }, ], "filter": filter, }, }, } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: similarity_name = "custom_bm25" mappings: Dict[str, Any] = { "properties": { text_field: { "type": "text", "similarity": similarity_name, }, }, } bm25: Dict[str, Any] = { "type": "BM25", } if self.k1 is not None: bm25["k1"] = self.k1 if self.b is not None: bm25["b"] = self.b settings = { "similarity": { similarity_name: bm25, } } return mappings, settings python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_async/vectorstore.py000066400000000000000000000405031506101734100307130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import uuid from typing import Any, Callable, Dict, List, Optional from elasticsearch import AsyncElasticsearch from elasticsearch._version import __versionstr__ as lib_version from elasticsearch.helpers import BulkIndexError, async_bulk from elasticsearch.helpers.vectorstore import ( AsyncEmbeddingService, AsyncRetrievalStrategy, ) from elasticsearch.helpers.vectorstore._utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class AsyncVectorStore: """ VectorStore is a higher-level abstraction of indexing and search. Users can pick from available retrieval strategies. Documents have up to 3 fields: - text_field: the text to be indexed and searched. - metadata: additional information about the document, either schema-free or defined by the supplied metadata_mappings. - vector_field (usually not filled by the user): the embedding vector of the text. Depending on the strategy, vector embeddings are - created by the user beforehand - created by this AsyncVectorStore class in Python - created in-stack by inference pipelines. """ def __init__( self, client: AsyncElasticsearch, *, index: str, retrieval_strategy: AsyncRetrievalStrategy, embedding_service: Optional[AsyncEmbeddingService] = None, num_dimensions: Optional[int] = None, text_field: str = "text_field", vector_field: str = "vector_field", metadata_mappings: Optional[Dict[str, Any]] = None, user_agent: str = f"elasticsearch-py-vs/{lib_version}", custom_index_settings: Optional[Dict[str, Any]] = None, ) -> None: """ :param user_header: user agent header specific to the 3rd party integration. Used for usage tracking in Elastic Cloud. :param index: The name of the index to query. :param retrieval_strategy: how to index and search the data. See the strategies module for availble strategies. :param text_field: Name of the field with the textual data. :param vector_field: For strategies that perform embedding inference in Python, the embedding vector goes in this field. :param client: Elasticsearch client connection. Alternatively specify the Elasticsearch connection with the other es_* parameters. :param custom_index_settings: A dictionary of custom settings for the index. This can include configurations like the number of shards, number of replicas, analysis settings, and other index-specific settings. If not provided, default settings will be used. Note that if the same setting is provided by both the user and the strategy, will raise an error. """ # Add integration-specific usage header for tracking usage in Elastic Cloud. # client.options preserves existing (non-user-agent) headers. client = client.options(headers={"User-Agent": user_agent}) if hasattr(retrieval_strategy, "text_field"): retrieval_strategy.text_field = text_field if hasattr(retrieval_strategy, "vector_field"): retrieval_strategy.vector_field = vector_field self.client = client self.index = index self.retrieval_strategy = retrieval_strategy self.embedding_service = embedding_service self.num_dimensions = num_dimensions self.text_field = text_field self.vector_field = vector_field self.metadata_mappings = metadata_mappings self.custom_index_settings = custom_index_settings async def close(self) -> None: return await self.client.close() async def add_texts( self, texts: List[str], *, metadatas: Optional[List[Dict[str, Any]]] = None, vectors: Optional[List[List[float]]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, create_index_if_not_exists: bool = True, bulk_kwargs: Optional[Dict[str, Any]] = None, ) -> List[str]: """Add documents to the Elasticsearch index. :param texts: List of text documents. :param metadata: Optional list of document metadata. Must be of same length as texts. :param vectors: Optional list of embedding vectors. Must be of same length as texts. :param ids: Optional list of ID strings. Must be of same length as texts. :param refresh_indices: Whether to refresh the index after deleting documents. Defaults to True. :param create_index_if_not_exists: Whether to create the index if it does not exist. Defaults to True. :param bulk_kwargs: Arguments to pass to the bulk function when indexing (for example chunk_size). :return: List of IDs of the created documents, either echoing the provided one or returning newly created ones. """ bulk_kwargs = bulk_kwargs or {} ids = ids or [str(uuid.uuid4()) for _ in texts] requests = [] if create_index_if_not_exists: await self._create_index_if_not_exists() if self.embedding_service and not vectors: vectors = await self.embedding_service.embed_documents(texts) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request: Dict[str, Any] = { "_op_type": "index", "_index": self.index, self.text_field: text, "metadata": metadata, "_id": ids[i], } if vectors: request[self.vector_field] = vectors[i] requests.append(request) if len(requests) > 0: try: success, failed = await async_bulk( self.client, requests, stats_only=True, refresh=refresh_indices, **bulk_kwargs, ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] async def delete( # type: ignore[no-untyped-def] self, *, ids: Optional[List[str]] = None, query: Optional[Dict[str, Any]] = None, refresh_indices: bool = True, **delete_kwargs, ) -> bool: """Delete documents from the Elasticsearch index. :param ids: List of IDs of documents to delete. :param refresh_indices: Whether to refresh the index after deleting documents. Defaults to True. :return: True if deletion was successful. """ if ids is not None and query is not None: raise ValueError("one of ids or query must be specified") elif ids is None and query is None: raise ValueError("either specify ids or query") try: if ids: body = [ {"_op_type": "delete", "_index": self.index, "_id": _id} for _id in ids ] await async_bulk( self.client, body, refresh=refresh_indices, ignore_status=404, **delete_kwargs, ) logger.debug(f"Deleted {len(body)} texts from index") else: await self.client.delete_by_query( index=self.index, query=query, refresh=refresh_indices, **delete_kwargs, ) except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e return True async def search( self, *, query: Optional[str] = None, query_vector: Optional[List[float]] = None, k: int = 4, num_candidates: int = 50, fields: Optional[List[str]] = None, filter: Optional[List[Dict[str, Any]]] = None, custom_query: Optional[ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]] ] = None, ) -> List[Dict[str, Any]]: """ :param query: Input query string. :param query_vector: Input embedding vector. If given, input query string is ignored. :param k: Number of returned results. :param num_candidates: Number of candidates to fetch from data nodes in knn. :param fields: List of field names to return. :param filter: Elasticsearch filters to apply. :param custom_query: Function to modify the Elasticsearch query body before it is sent to Elasticsearch. :return: List of document hits. Includes _index, _id, _score and _source. """ if fields is None: fields = [] if "metadata" not in fields: fields.append("metadata") if self.text_field not in fields: fields.append(self.text_field) if self.embedding_service and not query_vector: if not query: raise ValueError("specify a query or a query_vector to search") query_vector = await self.embedding_service.embed_query(query) query_body = self.retrieval_strategy.es_query( query=query, query_vector=query_vector, text_field=self.text_field, vector_field=self.vector_field, k=k, num_candidates=num_candidates, filter=filter or [], ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") response = await self.client.search( index=self.index, **query_body, size=k, source=True, source_includes=fields, ) hits: List[Dict[str, Any]] = response["hits"]["hits"] return hits async def _create_index_if_not_exists(self) -> None: exists = await self.client.indices.exists(index=self.index) if exists.meta.status == 200: logger.debug(f"Index {self.index} already exists. Skipping creation.") return if self.retrieval_strategy.needs_inference(): if not self.num_dimensions and not self.embedding_service: raise ValueError( "retrieval strategy requires embeddings; either embedding_service " "or num_dimensions need to be specified" ) if not self.num_dimensions and self.embedding_service: vector = await self.embedding_service.embed_query("get num dimensions") self.num_dimensions = len(vector) mappings, settings = self.retrieval_strategy.es_mappings_settings( text_field=self.text_field, vector_field=self.vector_field, num_dimensions=self.num_dimensions, ) if self.custom_index_settings: conflicting_keys = set(self.custom_index_settings.keys()) & set( settings.keys() ) if conflicting_keys: raise ValueError(f"Conflicting settings: {conflicting_keys}") else: settings.update(self.custom_index_settings) if self.metadata_mappings: metadata = mappings["properties"].get("metadata", {"properties": {}}) for key in self.metadata_mappings.keys(): if key in metadata: raise ValueError(f"metadata key {key} already exists in mappings") metadata = dict(**metadata["properties"], **self.metadata_mappings) mappings["properties"]["metadata"] = {"properties": metadata} await self.retrieval_strategy.before_index_creation( client=self.client, text_field=self.text_field, vector_field=self.vector_field, ) await self.client.indices.create( index=self.index, mappings=mappings, settings=settings ) async def max_marginal_relevance_search( self, *, query: Optional[str] = None, query_embedding: Optional[List[float]] = None, embedding_service: Optional[AsyncEmbeddingService] = None, vector_field: str, k: int = 4, num_candidates: int = 20, lambda_mult: float = 0.5, fields: Optional[List[str]] = None, custom_query: Optional[ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]] ] = None, ) -> List[Dict[str, Any]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query (str): Text to look up documents similar to. :param query_embedding: Input embedding vector. If given, input query string is ignored. :param k (int): Number of Documents to return. Defaults to 4. :param fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. :param fields: Other fields to get from elasticsearch source. These fields will be added to the document metadata. :return: A list of Documents selected by maximal marginal relevance. """ remove_vector_query_field_from_metadata = True if fields is None: fields = [vector_field] elif vector_field not in fields: fields.append(vector_field) else: remove_vector_query_field_from_metadata = False # Embed the query if query_embedding: query_vector = query_embedding else: if not query: raise ValueError("specify either query or query_embedding to search") elif embedding_service: query_vector = await embedding_service.embed_query(query) elif self.embedding_service: query_vector = await self.embedding_service.embed_query(query) else: raise ValueError("specify embedding_service to search with query") # Fetch the initial documents got_hits = await self.search( query=None, query_vector=query_vector, k=num_candidates, fields=fields, custom_query=custom_query, ) # Get the embeddings for the fetched documents got_embeddings = [hit["_source"][vector_field] for hit in got_hits] # Select documents using maximal marginal relevance selected_indices = maximal_marginal_relevance( query_vector, got_embeddings, lambda_mult=lambda_mult, k=k ) selected_hits = [got_hits[i] for i in selected_indices] if remove_vector_query_field_from_metadata: for hit in selected_hits: del hit["_source"][vector_field] return selected_hits python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/000077500000000000000000000000001506101734100256175ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/__init__.py000066400000000000000000000014231506101734100277300ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/_utils.py000066400000000000000000000030411506101734100274660ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from elasticsearch import BadRequestError, Elasticsearch, NotFoundError def model_must_be_deployed(client: Elasticsearch, model_id: str) -> None: """ :raises [NotFoundError]: if the model is neither downloaded nor deployed. :raises [ConflictError]: if the model is downloaded but not yet deployed. """ doc = {"text_field": f"test if the model '{model_id}' is deployed"} try: client.ml.infer_trained_model(model_id=model_id, docs=[doc]) except BadRequestError: # The model is deployed but expects a different input field name. pass def model_is_deployed(client: Elasticsearch, model_id: str) -> bool: try: model_must_be_deployed(client, model_id) return True except NotFoundError: return False python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/embedding_service.py000066400000000000000000000067761506101734100316470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from abc import ABC, abstractmethod from typing import List from elasticsearch import Elasticsearch from elasticsearch._version import __versionstr__ as lib_version class EmbeddingService(ABC): @abstractmethod def embed_documents(self, texts: List[str]) -> List[List[float]]: """Generate embeddings for a list of documents. :param texts: A list of document strings to generate embeddings for. :return: A list of embeddings, one for each document in the input. """ @abstractmethod def embed_query(self, query: str) -> List[float]: """Generate an embedding for a single query text. :param text: The query text to generate an embedding for. :return: The embedding for the input query text. """ class ElasticsearchEmbeddings(EmbeddingService): """Elasticsearch as a service for embedding model inference. You need to have an embedding model downloaded and deployed in Elasticsearch: - https://www.elastic.co/guide/en/elasticsearch/reference/current/infer-trained-model.html - https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-deploy-models.html """ # noqa: E501 def __init__( self, *, client: Elasticsearch, model_id: str, input_field: str = "text_field", user_agent: str = f"elasticsearch-py-es/{lib_version}", ): """ :param agent_header: user agent header specific to the 3rd party integration. Used for usage tracking in Elastic Cloud. :param model_id: The model_id of the model deployed in the Elasticsearch cluster. :param input_field: The name of the key for the input text field in the document. Defaults to 'text_field'. :param client: Elasticsearch client connection. Alternatively specify the Elasticsearch connection with the other es_* parameters. """ # Add integration-specific usage header for tracking usage in Elastic Cloud. # client.options preserves existing (non-user-agent) headers. client = client.options(headers={"User-Agent": user_agent}) self.client = client self.model_id = model_id self.input_field = input_field def embed_documents(self, texts: List[str]) -> List[List[float]]: return self._embedding_func(texts) def embed_query(self, text: str) -> List[float]: result = self._embedding_func([text]) return result[0] def _embedding_func(self, texts: List[str]) -> List[List[float]]: response = self.client.ml.infer_trained_model( model_id=self.model_id, docs=[{self.input_field: text} for text in texts] ) return [doc["predicted_value"] for doc in response["inference_results"]] python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/strategies.py000066400000000000000000000373131506101734100303520ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Union, cast from elasticsearch import Elasticsearch from elasticsearch.helpers.vectorstore._sync._utils import model_must_be_deployed from elasticsearch.helpers.vectorstore._utils import DistanceMetric class RetrievalStrategy(ABC): @abstractmethod def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: """ Returns the Elasticsearch query body for the given parameters. The store will execute the query. :param query: The text query. Can be None if query_vector is given. :param k: The total number of results to retrieve. :param num_candidates: The number of results to fetch initially in knn search. :param filter: List of filter clauses to apply to the query. :param query_vector: The query vector. Can be None if a query string is given. :return: The Elasticsearch query body. """ @abstractmethod def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: """ Create the required index and do necessary preliminary work, like creating inference pipelines or checking if a required model was deployed. :param client: Elasticsearch client connection. :param text_field: The field containing the text data in the index. :param vector_field: The field containing the vector representations in the index. :param num_dimensions: If vectors are indexed, how many dimensions do they have. :return: Dictionary with field and field type pairs that describe the schema. """ def before_index_creation( self, *, client: Elasticsearch, text_field: str, vector_field: str ) -> None: """ Executes before the index is created. Used for setting up any required Elasticsearch resources like a pipeline. Defaults to a no-op. :param client: The Elasticsearch client. :param text_field: The field containing the text data in the index. :param vector_field: The field containing the vector representations in the index. """ pass def needs_inference(self) -> bool: """ Some retrieval strategies index embedding vectors and allow search by embedding vector, for example the `DenseVectorStrategy` strategy. Mapping a user input query string to an embedding vector is called inference. Inference can be applied in Elasticsearch (using a `model_id`) or outside of Elasticsearch (using an `EmbeddingService` defined on the `VectorStore`). In the latter case, this method has to return True. """ return False class SparseVectorStrategy(RetrievalStrategy): """Sparse retrieval strategy using the `sparse_vector` processor.""" def __init__(self, model_id: str = ".elser_model_2"): self.model_id = model_id self._tokens_field = "tokens" self._pipeline_name = f"{self.model_id}_sparse_embedding" def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: if query_vector: raise ValueError( "Cannot do sparse retrieval with a query_vector. " "Inference is currently always applied in Elasticsearch." ) if query is None: raise ValueError("please specify a query string") return { "query": { "bool": { "must": [ { "sparse_vector": { "field": f"{vector_field}.{self._tokens_field}", "inference_id": self.model_id, "query": query, } } ], "filter": filter, } } } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: mappings: Dict[str, Any] = { "properties": { vector_field: { "properties": {self._tokens_field: {"type": "sparse_vector"}} } } } settings = {"default_pipeline": self._pipeline_name} return mappings, settings def before_index_creation( self, *, client: Elasticsearch, text_field: str, vector_field: str ) -> None: if self.model_id: model_must_be_deployed(client, self.model_id) # Create a pipeline for the model client.ingest.put_pipeline( id=self._pipeline_name, description="Embedding pipeline for Python VectorStore", processors=[ { "inference": { "model_id": self.model_id, "input_output": [ { "input_field": text_field, "output_field": f"{vector_field}.{self._tokens_field}", }, ], } } ], ) class DenseVectorStrategy(RetrievalStrategy): """K-nearest-neighbors retrieval.""" def __init__( self, *, distance: DistanceMetric = DistanceMetric.COSINE, model_id: Optional[str] = None, hybrid: bool = False, rrf: Union[bool, Dict[str, Any]] = True, text_field: Optional[str] = "text_field", ): if hybrid and not text_field: raise ValueError( "to enable hybrid you have to specify a text_field (for BM25Strategy matching)" ) self.distance = distance self.model_id = model_id self.hybrid = hybrid self.rrf = rrf self.text_field = text_field def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: knn = { "filter": filter, "field": vector_field, "k": k, "num_candidates": num_candidates, } if query_vector is not None: knn["query_vector"] = query_vector else: # Inference in Elasticsearch. When initializing we make sure to always have # a model_id if don't have an embedding_service. knn["query_vector_builder"] = { "text_embedding": { "model_id": self.model_id, "model_text": query, } } if self.hybrid: return self._hybrid(query=cast(str, query), knn=knn, filter=filter) return {"knn": knn} def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: if self.distance is DistanceMetric.COSINE: similarity = "cosine" elif self.distance is DistanceMetric.EUCLIDEAN_DISTANCE: similarity = "l2_norm" elif self.distance is DistanceMetric.DOT_PRODUCT: similarity = "dot_product" elif self.distance is DistanceMetric.MAX_INNER_PRODUCT: similarity = "max_inner_product" else: raise ValueError(f"Similarity {self.distance} not supported.") mappings: Dict[str, Any] = { "properties": { vector_field: { "type": "dense_vector", "dims": num_dimensions, "index": True, "similarity": similarity, }, } } return mappings, {} def before_index_creation( self, *, client: Elasticsearch, text_field: str, vector_field: str ) -> None: if self.model_id: model_must_be_deployed(client, self.model_id) def _hybrid( self, query: str, knn: Dict[str, Any], filter: List[Dict[str, Any]] ) -> Dict[str, Any]: # Add a query to the knn query. # RRF is used to even the score from the knn query and text query # RRF has two optional parameters: {'rank_constant':int, 'rank_window_size':int} # https://www.elastic.co/guide/en/elasticsearch/reference/current/rrf.html standard_query = { "query": { "bool": { "must": [ { "match": { self.text_field: { "query": query, } } } ], "filter": filter, } } } if self.rrf is False: query_body = { "knn": knn, **standard_query, } else: rrf_options = {} if isinstance(self.rrf, Dict): if "rank_constant" in self.rrf: rrf_options["rank_constant"] = self.rrf["rank_constant"] if "window_size" in self.rrf: # 'window_size' was renamed to 'rank_window_size', but we support # the older name for backwards compatibility rrf_options["rank_window_size"] = self.rrf["window_size"] if "rank_window_size" in self.rrf: rrf_options["rank_window_size"] = self.rrf["rank_window_size"] query_body = { "retriever": { "rrf": { "retrievers": [ {"standard": standard_query}, {"knn": knn}, ], **rrf_options, }, }, } return query_body def needs_inference(self) -> bool: return not self.model_id class DenseVectorScriptScoreStrategy(RetrievalStrategy): """Exact nearest neighbors retrieval using the `script_score` query.""" def __init__(self, distance: DistanceMetric = DistanceMetric.COSINE) -> None: self.distance = distance def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: if not query_vector: raise ValueError("specify a query_vector") if self.distance is DistanceMetric.COSINE: similarity_algo = ( f"cosineSimilarity(params.query_vector, '{vector_field}') + 1.0" ) elif self.distance is DistanceMetric.EUCLIDEAN_DISTANCE: similarity_algo = f"1 / (1 + l2norm(params.query_vector, '{vector_field}'))" elif self.distance is DistanceMetric.DOT_PRODUCT: similarity_algo = f""" double value = dotProduct(params.query_vector, '{vector_field}'); return sigmoid(1, Math.E, -value); """ elif self.distance is DistanceMetric.MAX_INNER_PRODUCT: similarity_algo = f""" double value = dotProduct(params.query_vector, '{vector_field}'); if (dotProduct < 0) {{ return 1 / (1 + -1 * dotProduct); }} return dotProduct + 1; """ else: raise ValueError(f"Similarity {self.distance} not supported.") query_bool: Dict[str, Any] = {"match_all": {}} if filter: query_bool = {"bool": {"filter": filter}} return { "query": { "script_score": { "query": query_bool, "script": { "source": similarity_algo, "params": {"query_vector": query_vector}, }, }, } } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: mappings = { "properties": { vector_field: { "type": "dense_vector", "dims": num_dimensions, "index": False, } } } return mappings, {} def needs_inference(self) -> bool: return True class BM25Strategy(RetrievalStrategy): def __init__( self, k1: Optional[float] = None, b: Optional[float] = None, ): self.k1 = k1 self.b = b def es_query( self, *, query: Optional[str], query_vector: Optional[List[float]], text_field: str, vector_field: str, k: int, num_candidates: int, filter: List[Dict[str, Any]] = [], ) -> Dict[str, Any]: return { "query": { "bool": { "must": [ { "match": { text_field: { "query": query, } }, }, ], "filter": filter, }, }, } def es_mappings_settings( self, *, text_field: str, vector_field: str, num_dimensions: Optional[int], ) -> Tuple[Dict[str, Any], Dict[str, Any]]: similarity_name = "custom_bm25" mappings: Dict[str, Any] = { "properties": { text_field: { "type": "text", "similarity": similarity_name, }, }, } bm25: Dict[str, Any] = { "type": "BM25", } if self.k1 is not None: bm25["k1"] = self.k1 if self.b is not None: bm25["b"] = self.b settings = { "similarity": { similarity_name: bm25, } } return mappings, settings python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_sync/vectorstore.py000066400000000000000000000402131506101734100305500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import uuid from typing import Any, Callable, Dict, List, Optional from elasticsearch import Elasticsearch from elasticsearch._version import __versionstr__ as lib_version from elasticsearch.helpers import BulkIndexError, bulk from elasticsearch.helpers.vectorstore import ( EmbeddingService, RetrievalStrategy, ) from elasticsearch.helpers.vectorstore._utils import maximal_marginal_relevance logger = logging.getLogger(__name__) class VectorStore: """ VectorStore is a higher-level abstraction of indexing and search. Users can pick from available retrieval strategies. Documents have up to 3 fields: - text_field: the text to be indexed and searched. - metadata: additional information about the document, either schema-free or defined by the supplied metadata_mappings. - vector_field (usually not filled by the user): the embedding vector of the text. Depending on the strategy, vector embeddings are - created by the user beforehand - created by this AsyncVectorStore class in Python - created in-stack by inference pipelines. """ def __init__( self, client: Elasticsearch, *, index: str, retrieval_strategy: RetrievalStrategy, embedding_service: Optional[EmbeddingService] = None, num_dimensions: Optional[int] = None, text_field: str = "text_field", vector_field: str = "vector_field", metadata_mappings: Optional[Dict[str, Any]] = None, user_agent: str = f"elasticsearch-py-vs/{lib_version}", custom_index_settings: Optional[Dict[str, Any]] = None, ) -> None: """ :param user_header: user agent header specific to the 3rd party integration. Used for usage tracking in Elastic Cloud. :param index: The name of the index to query. :param retrieval_strategy: how to index and search the data. See the strategies module for availble strategies. :param text_field: Name of the field with the textual data. :param vector_field: For strategies that perform embedding inference in Python, the embedding vector goes in this field. :param client: Elasticsearch client connection. Alternatively specify the Elasticsearch connection with the other es_* parameters. :param custom_index_settings: A dictionary of custom settings for the index. This can include configurations like the number of shards, number of replicas, analysis settings, and other index-specific settings. If not provided, default settings will be used. Note that if the same setting is provided by both the user and the strategy, will raise an error. """ # Add integration-specific usage header for tracking usage in Elastic Cloud. # client.options preserves existing (non-user-agent) headers. client = client.options(headers={"User-Agent": user_agent}) if hasattr(retrieval_strategy, "text_field"): retrieval_strategy.text_field = text_field if hasattr(retrieval_strategy, "vector_field"): retrieval_strategy.vector_field = vector_field self.client = client self.index = index self.retrieval_strategy = retrieval_strategy self.embedding_service = embedding_service self.num_dimensions = num_dimensions self.text_field = text_field self.vector_field = vector_field self.metadata_mappings = metadata_mappings self.custom_index_settings = custom_index_settings def close(self) -> None: return self.client.close() def add_texts( self, texts: List[str], *, metadatas: Optional[List[Dict[str, Any]]] = None, vectors: Optional[List[List[float]]] = None, ids: Optional[List[str]] = None, refresh_indices: bool = True, create_index_if_not_exists: bool = True, bulk_kwargs: Optional[Dict[str, Any]] = None, ) -> List[str]: """Add documents to the Elasticsearch index. :param texts: List of text documents. :param metadata: Optional list of document metadata. Must be of same length as texts. :param vectors: Optional list of embedding vectors. Must be of same length as texts. :param ids: Optional list of ID strings. Must be of same length as texts. :param refresh_indices: Whether to refresh the index after deleting documents. Defaults to True. :param create_index_if_not_exists: Whether to create the index if it does not exist. Defaults to True. :param bulk_kwargs: Arguments to pass to the bulk function when indexing (for example chunk_size). :return: List of IDs of the created documents, either echoing the provided one or returning newly created ones. """ bulk_kwargs = bulk_kwargs or {} ids = ids or [str(uuid.uuid4()) for _ in texts] requests = [] if create_index_if_not_exists: self._create_index_if_not_exists() if self.embedding_service and not vectors: vectors = self.embedding_service.embed_documents(texts) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request: Dict[str, Any] = { "_op_type": "index", "_index": self.index, self.text_field: text, "metadata": metadata, "_id": ids[i], } if vectors: request[self.vector_field] = vectors[i] requests.append(request) if len(requests) > 0: try: success, failed = bulk( self.client, requests, stats_only=True, refresh=refresh_indices, **bulk_kwargs, ) logger.debug(f"added texts {ids} to index") return ids except BulkIndexError as e: logger.error(f"Error adding texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e else: logger.debug("No texts to add to index") return [] def delete( # type: ignore[no-untyped-def] self, *, ids: Optional[List[str]] = None, query: Optional[Dict[str, Any]] = None, refresh_indices: bool = True, **delete_kwargs, ) -> bool: """Delete documents from the Elasticsearch index. :param ids: List of IDs of documents to delete. :param refresh_indices: Whether to refresh the index after deleting documents. Defaults to True. :return: True if deletion was successful. """ if ids is not None and query is not None: raise ValueError("one of ids or query must be specified") elif ids is None and query is None: raise ValueError("either specify ids or query") try: if ids: body = [ {"_op_type": "delete", "_index": self.index, "_id": _id} for _id in ids ] bulk( self.client, body, refresh=refresh_indices, ignore_status=404, **delete_kwargs, ) logger.debug(f"Deleted {len(body)} texts from index") else: self.client.delete_by_query( index=self.index, query=query, refresh=refresh_indices, **delete_kwargs, ) except BulkIndexError as e: logger.error(f"Error deleting texts: {e}") firstError = e.errors[0].get("index", {}).get("error", {}) logger.error(f"First error reason: {firstError.get('reason')}") raise e return True def search( self, *, query: Optional[str] = None, query_vector: Optional[List[float]] = None, k: int = 4, num_candidates: int = 50, fields: Optional[List[str]] = None, filter: Optional[List[Dict[str, Any]]] = None, custom_query: Optional[ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]] ] = None, ) -> List[Dict[str, Any]]: """ :param query: Input query string. :param query_vector: Input embedding vector. If given, input query string is ignored. :param k: Number of returned results. :param num_candidates: Number of candidates to fetch from data nodes in knn. :param fields: List of field names to return. :param filter: Elasticsearch filters to apply. :param custom_query: Function to modify the Elasticsearch query body before it is sent to Elasticsearch. :return: List of document hits. Includes _index, _id, _score and _source. """ if fields is None: fields = [] if "metadata" not in fields: fields.append("metadata") if self.text_field not in fields: fields.append(self.text_field) if self.embedding_service and not query_vector: if not query: raise ValueError("specify a query or a query_vector to search") query_vector = self.embedding_service.embed_query(query) query_body = self.retrieval_strategy.es_query( query=query, query_vector=query_vector, text_field=self.text_field, vector_field=self.vector_field, k=k, num_candidates=num_candidates, filter=filter or [], ) if custom_query is not None: query_body = custom_query(query_body, query) logger.debug(f"Calling custom_query, Query body now: {query_body}") response = self.client.search( index=self.index, **query_body, size=k, source=True, source_includes=fields, ) hits: List[Dict[str, Any]] = response["hits"]["hits"] return hits def _create_index_if_not_exists(self) -> None: exists = self.client.indices.exists(index=self.index) if exists.meta.status == 200: logger.debug(f"Index {self.index} already exists. Skipping creation.") return if self.retrieval_strategy.needs_inference(): if not self.num_dimensions and not self.embedding_service: raise ValueError( "retrieval strategy requires embeddings; either embedding_service " "or num_dimensions need to be specified" ) if not self.num_dimensions and self.embedding_service: vector = self.embedding_service.embed_query("get num dimensions") self.num_dimensions = len(vector) mappings, settings = self.retrieval_strategy.es_mappings_settings( text_field=self.text_field, vector_field=self.vector_field, num_dimensions=self.num_dimensions, ) if self.custom_index_settings: conflicting_keys = set(self.custom_index_settings.keys()) & set( settings.keys() ) if conflicting_keys: raise ValueError(f"Conflicting settings: {conflicting_keys}") else: settings.update(self.custom_index_settings) if self.metadata_mappings: metadata = mappings["properties"].get("metadata", {"properties": {}}) for key in self.metadata_mappings.keys(): if key in metadata: raise ValueError(f"metadata key {key} already exists in mappings") metadata = dict(**metadata["properties"], **self.metadata_mappings) mappings["properties"]["metadata"] = {"properties": metadata} self.retrieval_strategy.before_index_creation( client=self.client, text_field=self.text_field, vector_field=self.vector_field, ) self.client.indices.create( index=self.index, mappings=mappings, settings=settings ) def max_marginal_relevance_search( self, *, query: Optional[str] = None, query_embedding: Optional[List[float]] = None, embedding_service: Optional[EmbeddingService] = None, vector_field: str, k: int = 4, num_candidates: int = 20, lambda_mult: float = 0.5, fields: Optional[List[str]] = None, custom_query: Optional[ Callable[[Dict[str, Any], Optional[str]], Dict[str, Any]] ] = None, ) -> List[Dict[str, Any]]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. :param query (str): Text to look up documents similar to. :param query_embedding: Input embedding vector. If given, input query string is ignored. :param k (int): Number of Documents to return. Defaults to 4. :param fetch_k (int): Number of Documents to fetch to pass to MMR algorithm. :param lambda_mult (float): Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. :param fields: Other fields to get from elasticsearch source. These fields will be added to the document metadata. :return: A list of Documents selected by maximal marginal relevance. """ remove_vector_query_field_from_metadata = True if fields is None: fields = [vector_field] elif vector_field not in fields: fields.append(vector_field) else: remove_vector_query_field_from_metadata = False # Embed the query if query_embedding: query_vector = query_embedding else: if not query: raise ValueError("specify either query or query_embedding to search") elif embedding_service: query_vector = embedding_service.embed_query(query) elif self.embedding_service: query_vector = self.embedding_service.embed_query(query) else: raise ValueError("specify embedding_service to search with query") # Fetch the initial documents got_hits = self.search( query=None, query_vector=query_vector, k=num_candidates, fields=fields, custom_query=custom_query, ) # Get the embeddings for the fetched documents got_embeddings = [hit["_source"][vector_field] for hit in got_hits] # Select documents using maximal marginal relevance selected_indices = maximal_marginal_relevance( query_vector, got_embeddings, lambda_mult=lambda_mult, k=k ) selected_hits = [got_hits[i] for i in selected_indices] if remove_vector_query_field_from_metadata: for hit in selected_hits: del hit["_source"][vector_field] return selected_hits python-elasticsearch-9.1.1/elasticsearch/helpers/vectorstore/_utils.py000066400000000000000000000076211506101734100263630ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from enum import Enum from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: import numpy as np import numpy.typing as npt Matrix = Union[ List[List[float]], List["npt.NDArray[np.float64]"], "npt.NDArray[np.float64]" ] class DistanceMetric(str, Enum): """Enumerator of all Elasticsearch dense vector distance metrics.""" COSINE = "COSINE" DOT_PRODUCT = "DOT_PRODUCT" EUCLIDEAN_DISTANCE = "EUCLIDEAN_DISTANCE" MAX_INNER_PRODUCT = "MAX_INNER_PRODUCT" def maximal_marginal_relevance( query_embedding: List[float], embedding_list: List[List[float]], lambda_mult: float = 0.5, k: int = 4, ) -> List[int]: """Calculate maximal marginal relevance.""" try: import numpy as np except ModuleNotFoundError as e: _raise_missing_mmr_deps_error(e) query_embedding_arr = np.array(query_embedding) if min(k, len(embedding_list)) <= 0: return [] if query_embedding_arr.ndim == 1: query_embedding_arr = np.expand_dims(query_embedding_arr, axis=0) similarity_to_query = _cosine_similarity(query_embedding_arr, embedding_list)[0] most_similar = int(np.argmax(similarity_to_query)) idxs = [most_similar] selected = np.array([embedding_list[most_similar]]) while len(idxs) < min(k, len(embedding_list)): best_score = -np.inf idx_to_add = -1 similarity_to_selected = _cosine_similarity(embedding_list, selected) for i, query_score in enumerate(similarity_to_query): if i in idxs: continue redundant_score = max(similarity_to_selected[i]) equation_score = ( lambda_mult * query_score - (1 - lambda_mult) * redundant_score ) if equation_score > best_score: best_score = equation_score idx_to_add = i idxs.append(idx_to_add) selected = np.append(selected, [embedding_list[idx_to_add]], axis=0) return idxs def _cosine_similarity(X: Matrix, Y: Matrix) -> "npt.NDArray[np.float64]": """Row-wise cosine similarity between two equal-width matrices.""" try: import numpy as np import simsimd as simd except ModuleNotFoundError as e: _raise_missing_mmr_deps_error(e) if len(X) == 0 or len(Y) == 0: return np.array([]) X = np.array(X) Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( f"Number of columns in X and Y must be the same. X has shape {X.shape} " f"and Y has shape {Y.shape}." ) X = np.array(X, dtype=np.float32) Y = np.array(Y, dtype=np.float32) Z = 1 - np.array(simd.cdist(X, Y, metric="cosine")) if isinstance(Z, float): return np.array([Z]) return np.array(Z) def _raise_missing_mmr_deps_error(parent_error: ModuleNotFoundError) -> None: import sys raise ModuleNotFoundError( f"Failed to compute maximal marginal relevance because the required " f"module '{parent_error.name}' is missing. You can install it by running: " f"'{sys.executable} -m pip install elasticsearch[vectorstore_mmr]'" ) from parent_error python-elasticsearch-9.1.1/elasticsearch/py.typed000066400000000000000000000000001506101734100221500ustar00rootroot00000000000000python-elasticsearch-9.1.1/elasticsearch/serializer.py000066400000000000000000000175731506101734100232230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import uuid from datetime import date, datetime from decimal import Decimal from typing import Any, ClassVar, Dict, Tuple from elastic_transport import JsonSerializer as _JsonSerializer from elastic_transport import NdjsonSerializer as _NdjsonSerializer from elastic_transport import Serializer as Serializer from elastic_transport import TextSerializer as TextSerializer from .exceptions import SerializationError INTEGER_TYPES = () FLOAT_TYPES = (Decimal,) TIME_TYPES = (date, datetime) __all__ = [ "Serializer", "JsonSerializer", "TextSerializer", "NdjsonSerializer", "CompatibilityModeJsonSerializer", "CompatibilityModeNdjsonSerializer", "MapboxVectorTileSerializer", ] try: from elastic_transport import OrjsonSerializer as _OrjsonSerializer __all__.append("OrjsonSerializer") except ImportError: _OrjsonSerializer = None # type: ignore[assignment,misc] try: import pyarrow as pa __all__.append("PyArrowSerializer") except ImportError: pa = None class JsonSerializer(_JsonSerializer): mimetype: ClassVar[str] = "application/json" def default(self, data: Any) -> Any: if isinstance(data, TIME_TYPES): # Little hack to avoid importing pandas but to not # return 'NaT' string for pd.NaT as that's not a valid # Elasticsearch date. formatted_data = data.isoformat() if formatted_data != "NaT": return formatted_data if isinstance(data, uuid.UUID): return str(data) elif isinstance(data, FLOAT_TYPES): return float(data) # This is kept for backwards compatibility even # if 'INTEGER_TYPES' isn't used by default anymore. elif INTEGER_TYPES and isinstance(data, INTEGER_TYPES): return int(data) # Special cases for numpy and pandas types # These are expensive to import so we try them last. serialized, value = _attempt_serialize_numpy_or_pandas(data) if serialized: return value raise TypeError(f"Unable to serialize {data!r} (type: {type(data)})") if _OrjsonSerializer is not None: class OrjsonSerializer(JsonSerializer, _OrjsonSerializer): def default(self, data: Any) -> Any: return JsonSerializer.default(self, data) class NdjsonSerializer(JsonSerializer, _NdjsonSerializer): mimetype: ClassVar[str] = "application/x-ndjson" def default(self, data: Any) -> Any: return JsonSerializer.default(self, data) class CompatibilityModeJsonSerializer(JsonSerializer): mimetype: ClassVar[str] = "application/vnd.elasticsearch+json" class CompatibilityModeNdjsonSerializer(NdjsonSerializer): mimetype: ClassVar[str] = "application/vnd.elasticsearch+x-ndjson" class MapboxVectorTileSerializer(Serializer): mimetype: ClassVar[str] = "application/vnd.mapbox-vector-tile" def loads(self, data: bytes) -> bytes: return data def dumps(self, data: bytes) -> bytes: if isinstance(data, bytes): return data raise SerializationError(f"Cannot serialize {data!r} into a MapBox vector tile") if pa is not None: class PyArrowSerializer(Serializer): """PyArrow serializer for deserializing Arrow Stream data.""" mimetype: ClassVar[str] = "application/vnd.apache.arrow.stream" def loads(self, data: bytes) -> pa.Table: try: with pa.ipc.open_stream(data) as reader: return reader.read_all() except pa.ArrowException as e: raise SerializationError( message=f"Unable to deserialize as Arrow stream: {data!r}", errors=(e,), ) def dumps(self, data: Any) -> bytes: raise SerializationError( message="Elasticsearch does not accept Arrow input data" ) DEFAULT_SERIALIZERS: Dict[str, Serializer] = { JsonSerializer.mimetype: JsonSerializer(), MapboxVectorTileSerializer.mimetype: MapboxVectorTileSerializer(), NdjsonSerializer.mimetype: NdjsonSerializer(), CompatibilityModeJsonSerializer.mimetype: CompatibilityModeJsonSerializer(), CompatibilityModeNdjsonSerializer.mimetype: CompatibilityModeNdjsonSerializer(), } if pa is not None: DEFAULT_SERIALIZERS[PyArrowSerializer.mimetype] = PyArrowSerializer() # Alias for backwards compatibility JSONSerializer = JsonSerializer def _attempt_serialize_numpy_or_pandas(data: Any) -> Tuple[bool, Any]: """Attempts to serialize a value from the numpy or pandas libraries. This function is separate from JSONSerializer because the inner functions are rewritten to be no-ops if either library isn't available to avoid attempting to import and raising an ImportError over and over again. Returns a tuple of (bool, Any) where the bool corresponds to whether the second value contains a properly serialized value and thus should be returned by JSONSerializer.default(). """ serialized, value = _attempt_serialize_numpy(data) if serialized: return serialized, value serialized, value = _attempt_serialize_pandas(data) if serialized: return serialized, value return False, None def _attempt_serialize_numpy(data: Any) -> Tuple[bool, Any]: global _attempt_serialize_numpy try: import numpy as np if isinstance( data, ( np.int_, np.intc, np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, ), ): return True, int(data) elif isinstance( data, ( np.float16, np.float32, np.float64, ), ): return True, float(data) elif isinstance(data, np.bool_): return True, bool(data) elif isinstance(data, np.datetime64): return True, data.item().isoformat() elif isinstance(data, np.ndarray): return True, data.tolist() except ImportError: # Since we failed to import 'numpy' we don't want to try again. _attempt_serialize_numpy = _attempt_serialize_noop return False, None def _attempt_serialize_pandas(data: Any) -> Tuple[bool, Any]: global _attempt_serialize_pandas try: import pandas as pd if isinstance(data, (pd.Series, pd.Categorical)): return True, data.tolist() elif isinstance(data, pd.Timestamp) and data is not getattr(pd, "NaT", None): return True, data.isoformat() elif data is getattr(pd, "NA", None): return True, None except ImportError: # Since we failed to import 'pandas' we don't want to try again. _attempt_serialize_pandas = _attempt_serialize_noop return False, None def _attempt_serialize_noop(data: Any) -> Tuple[bool, Any]: # noqa # Short-circuit if the above functions can't import # the corresponding library on the first attempt. return False, None python-elasticsearch-9.1.1/examples/000077500000000000000000000000001506101734100174675ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/bulk-ingest/000077500000000000000000000000001506101734100217135ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/bulk-ingest/README.md000066400000000000000000000021221506101734100231670ustar00rootroot00000000000000# Bulk Ingest A simple script that shows how to ingest a dataset from a file into Elasticsearch. The file that is used for this example is a `.csv` so each row is turned into a document. To run this example install the dependencies with `pip`: ```console python -m pip install -r requirements.txt ``` and then run the script with Python: ```console python bulk-ingest.py ``` You should see the script downloading the dataset into `nyc-restaurants.csv` Once all the data is loaded into Elasticsearch you can do queries on the dataset or create visualizations within Kibana. ```python import elasticsearch client = elasticsearch.Elasticsearch() resp = client.search( index="nyc-restaurants", size=0, body={ "aggs": { "borough": { "terms": { "field": "borough" }, "aggs": { "grades": { "terms": { "field": "grade" } } } } } } ) print(resp) ``` python-elasticsearch-9.1.1/examples/bulk-ingest/bulk-ingest.py000066400000000000000000000064471506101734100245240ustar00rootroot00000000000000#!/usr/bin/env python # Licensed to Elasticsearch B.V under one or more agreements. # Elasticsearch B.V licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information """Script that downloads a public dataset and streams it to an Elasticsearch cluster""" import csv from os.path import abspath, join, dirname, exists import tqdm import urllib3 from elasticsearch import Elasticsearch from elasticsearch.helpers import streaming_bulk NYC_RESTAURANTS = ( "https://data.cityofnewyork.us/api/views/43nn-pn8j/rows.csv?accessType=DOWNLOAD" ) DATASET_PATH = join(dirname(abspath(__file__)), "nyc-restaurants.csv") CHUNK_SIZE = 16384 def download_dataset(): """Downloads the public dataset if not locally downlaoded and returns the number of rows are in the .csv file. """ if not exists(DATASET_PATH): http = urllib3.PoolManager() resp = http.request("GET", NYC_RESTAURANTS, preload_content=False) if resp.status != 200: raise RuntimeError("Could not download dataset") with open(DATASET_PATH, mode="wb") as f: chunk = resp.read(CHUNK_SIZE) while chunk: f.write(chunk) chunk = resp.read(CHUNK_SIZE) with open(DATASET_PATH) as f: return sum([1 for _ in f]) - 1 def create_index(client): """Creates an index in Elasticsearch if one isn't already there.""" client.indices.create( index="nyc-restaurants", body={ "settings": {"number_of_shards": 1}, "mappings": { "properties": { "name": {"type": "text"}, "borough": {"type": "keyword"}, "cuisine": {"type": "keyword"}, "grade": {"type": "keyword"}, "location": {"type": "geo_point"}, } }, }, ignore=400, ) def generate_actions(): """Reads the file through csv.DictReader() and for each row yields a single document. This function is passed into the bulk() helper to create many documents in sequence. """ with open(DATASET_PATH) as f: reader = csv.DictReader(f) for row in reader: doc = { "_id": row["CAMIS"], "name": row["DBA"], "borough": row["BORO"], "cuisine": row["CUISINE DESCRIPTION"], "grade": row["GRADE"] or None, } lat = row["Latitude"] lon = row["Longitude"] if lat not in ("", "0") and lon not in ("", "0"): doc["location"] = {"lat": float(lat), "lon": float(lon)} yield doc def main(): print("Loading dataset...") number_of_docs = download_dataset() client = Elasticsearch( # Add your cluster configuration here! ) print("Creating an index...") create_index(client) print("Indexing documents...") progress = tqdm.tqdm(unit="docs", total=number_of_docs) successes = 0 for ok, action in streaming_bulk( client=client, index="nyc-restaurants", actions=generate_actions(), ): progress.update(1) successes += ok print("Indexed %d/%d documents" % (successes, number_of_docs)) if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/bulk-ingest/requirements.txt000066400000000000000000000000331506101734100251730ustar00rootroot00000000000000elasticsearch urllib3 tqdm python-elasticsearch-9.1.1/examples/dsl/000077500000000000000000000000001506101734100202515ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/dsl/README.rst000066400000000000000000000031301506101734100217350ustar00rootroot00000000000000Elasticsearch DSL Examples ========================== In this directory you can see several complete examples demonstrating key concepts and patterns exposed by ``elasticsearch-dsl``. ``alias_migration.py`` ---------------------- The alias migration example shows a useful pattern where we use versioned indices (``test-blog-0``, ``test-blog-1``, ...) to manage schema changes and hides that behind an alias so that the application doesn't have to be aware of the versions and just refer to the ``test-blog`` alias for both read and write operations. For simplicity we use a timestamp as version in the index name. ``parent_child.py`` ------------------- More complex example highlighting the possible relationships available in elasticsearch - `parent/child `_ and `nested `_. ``composite_agg.py`` -------------------- A helper function using the `composite aggregation `_ to paginate over aggregation results. ``percolate.py`` ---------------- A ``BlogPost`` document with automatic classification using the `percolator `_ functionality. ``completion.py`` ----------------- As example using `completion suggester `_ to auto complete people's names. python-elasticsearch-9.1.1/examples/dsl/alias_migration.py000066400000000000000000000127141506101734100237720ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Simple example with a single Document demonstrating how schema can be managed, including upgrading with reindexing. Key concepts: * setup() function to first initialize the schema (as index template) in elasticsearch. Can be called any time (recommended with every deploy of your app). * migrate() function to be called any time when the schema changes - it will create a new index (by incrementing the version) and update the alias. By default it will also (before flipping the alias) move the data from the previous index to the new one. * BlogPost._matches() class method is required for this code to work since otherwise BlogPost will not be used to deserialize the documents as those will have index set to the concrete index whereas the class refers to the alias. """ import os from datetime import datetime from fnmatch import fnmatch from typing import TYPE_CHECKING, Any, Dict, List, Optional from elasticsearch.dsl import Document, Keyword, connections, mapped_field ALIAS = "test-blog" PATTERN = ALIAS + "-*" PRIORITY = 100 class BlogPost(Document): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: int title: str tags: List[str] = mapped_field(Keyword()) content: str published: Optional[datetime] = mapped_field(default=None) def is_published(self) -> bool: return bool(self.published and datetime.now() > self.published) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: # override _matches to match indices in a pattern instead of just ALIAS # hit is the raw dict as returned by elasticsearch return fnmatch(hit["_index"], PATTERN) class Index: # we will use an alias instead of the index name = ALIAS # set settings and possibly other attributes of the index like # analyzers settings = {"number_of_shards": 1, "number_of_replicas": 0} def setup() -> None: """ Create the index template in elasticsearch specifying the mappings and any settings to be used. This can be run at any time, ideally at every new code deploy. """ # create an index template index_template = BlogPost._index.as_composable_template( ALIAS, PATTERN, priority=PRIORITY ) # upload the template into elasticsearch # potentially overriding the one already there index_template.save() # create the first index if it doesn't exist if not BlogPost._index.exists(): migrate(move_data=False) def migrate(move_data: bool = True, update_alias: bool = True) -> None: """ Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost. """ # construct a new index name by appending current timestamp next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f")) # get the low level connection es = connections.get_connection() # create new index, it will use the settings from the template es.indices.create(index=next_index) if move_data: # move data from current alias to the new index es.options(request_timeout=3600).reindex( body={"source": {"index": ALIAS}, "dest": {"index": next_index}} ) # refresh the index to make the changes visible es.indices.refresh(index=next_index) if update_alias: # repoint the alias to point to the newly created index es.indices.update_aliases( body={ "actions": [ {"remove": {"alias": ALIAS, "index": PATTERN}}, {"add": {"alias": ALIAS, "index": next_index}}, ] } ) def main() -> None: # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index setup() # create a new document bp = BlogPost( _id=0, title="Hello World!", tags=["testing", "dummy"], content=open(__file__).read(), ) bp.save(refresh=True) # create new index migrate() # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/async/000077500000000000000000000000001506101734100213665ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/dsl/async/alias_migration.py000066400000000000000000000131361506101734100251060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Simple example with a single Document demonstrating how schema can be managed, including upgrading with reindexing. Key concepts: * setup() function to first initialize the schema (as index template) in elasticsearch. Can be called any time (recommended with every deploy of your app). * migrate() function to be called any time when the schema changes - it will create a new index (by incrementing the version) and update the alias. By default it will also (before flipping the alias) move the data from the previous index to the new one. * BlogPost._matches() class method is required for this code to work since otherwise BlogPost will not be used to deserialize the documents as those will have index set to the concrete index whereas the class refers to the alias. """ import asyncio import os from datetime import datetime from fnmatch import fnmatch from typing import TYPE_CHECKING, Any, Dict, List, Optional from elasticsearch.dsl import AsyncDocument, Keyword, async_connections, mapped_field ALIAS = "test-blog" PATTERN = ALIAS + "-*" PRIORITY = 100 class BlogPost(AsyncDocument): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: int title: str tags: List[str] = mapped_field(Keyword()) content: str published: Optional[datetime] = mapped_field(default=None) def is_published(self) -> bool: return bool(self.published and datetime.now() > self.published) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: # override _matches to match indices in a pattern instead of just ALIAS # hit is the raw dict as returned by elasticsearch return fnmatch(hit["_index"], PATTERN) class Index: # we will use an alias instead of the index name = ALIAS # set settings and possibly other attributes of the index like # analyzers settings = {"number_of_shards": 1, "number_of_replicas": 0} async def setup() -> None: """ Create the index template in elasticsearch specifying the mappings and any settings to be used. This can be run at any time, ideally at every new code deploy. """ # create an index template index_template = BlogPost._index.as_composable_template( ALIAS, PATTERN, priority=PRIORITY ) # upload the template into elasticsearch # potentially overriding the one already there await index_template.save() # create the first index if it doesn't exist if not await BlogPost._index.exists(): await migrate(move_data=False) async def migrate(move_data: bool = True, update_alias: bool = True) -> None: """ Upgrade function that creates a new index for the data. Optionally it also can (and by default will) reindex previous copy of the data into the new index (specify ``move_data=False`` to skip this step) and update the alias to point to the latest index (set ``update_alias=False`` to skip). Note that while this function is running the application can still perform any and all searches without any loss of functionality. It should, however, not perform any writes at this time as those might be lost. """ # construct a new index name by appending current timestamp next_index = PATTERN.replace("*", datetime.now().strftime("%Y%m%d%H%M%S%f")) # get the low level connection es = async_connections.get_connection() # create new index, it will use the settings from the template await es.indices.create(index=next_index) if move_data: # move data from current alias to the new index await es.options(request_timeout=3600).reindex( body={"source": {"index": ALIAS}, "dest": {"index": next_index}} ) # refresh the index to make the changes visible await es.indices.refresh(index=next_index) if update_alias: # repoint the alias to point to the newly created index await es.indices.update_aliases( body={ "actions": [ {"remove": {"alias": ALIAS, "index": PATTERN}}, {"add": {"alias": ALIAS, "index": next_index}}, ] } ) async def main() -> None: # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index await setup() # create a new document bp = BlogPost( _id=0, title="Hello World!", tags=["testing", "dummy"], content=open(__file__).read(), ) await bp.save(refresh=True) # create new index await migrate() # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/completion.py000066400000000000000000000072641506101734100241220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example ``Document`` with completion suggester. In the ``Person`` class we index the person's name to allow auto completing in any order ("first last", "middle last first", ...). For the weight we use a value from the ``popularity`` field which is a long. To make the suggestions work in different languages we added a custom analyzer that does ascii folding. """ import asyncio import os from itertools import permutations from typing import TYPE_CHECKING, Any, Dict, Optional from elasticsearch.dsl import ( AsyncDocument, Completion, Keyword, Long, Text, analyzer, async_connections, mapped_field, token_filter, ) # custom analyzer for names ascii_fold = analyzer( "ascii_fold", # we don't want to split O'Brian or Toulouse-Lautrec tokenizer="whitespace", filter=["lowercase", token_filter("ascii_fold", "asciifolding")], ) class Person(AsyncDocument): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: Optional[int] = mapped_field(default=None) name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") popularity: int = mapped_field(Long(), default=0) # completion field with a custom analyzer suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False) def clean(self) -> None: """ Automatically construct the suggestion input and weight by taking all possible permutations of Person's name as ``input`` and taking their popularity as ``weight``. """ self.suggest = { "input": [" ".join(p) for p in permutations(self.name.split())], "weight": self.popularity, } class Index: name = "test-suggest" settings = {"number_of_shards": 1, "number_of_replicas": 0} async def main() -> None: # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index await Person.init() # index some sample data for id, (name, popularity) in enumerate( [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)] ): await Person(_id=id, name=name, popularity=popularity).save() # refresh index manually to make changes live await Person._index.refresh() # run some suggestions for text in ("já", "Jara Cimr", "tou", "de hen"): s = Person.search() s = s.suggest("auto_complete", text, completion={"field": "suggest"}) response = await s.execute() # print out all the options we got for option in response.suggest["auto_complete"][0].options: print("%10s: %25s (%d)" % (text, option._source.name, option._score)) # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/composite_agg.py000066400000000000000000000065311506101734100245650ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import os from typing import Any, AsyncIterator, Dict, Mapping, Sequence, cast from elasticsearch.dsl import Agg, AsyncSearch, Response, aggs, async_connections from elasticsearch.dsl.types import CompositeAggregate from elasticsearch.helpers import async_bulk from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX async def scan_aggs( search: AsyncSearch, source_aggs: Sequence[Mapping[str, Agg]], inner_aggs: Dict[str, Agg] = {}, size: int = 10, ) -> AsyncIterator[CompositeAggregate]: """ Helper function used to iterate over all possible bucket combinations of ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the ``composite`` aggregation under the hood to perform this. """ async def run_search(**kwargs: Any) -> Response: s = search[:0] bucket = s.aggs.bucket( "comp", aggs.Composite( sources=source_aggs, size=size, **kwargs, ), ) for agg_name, agg in inner_aggs.items(): bucket[agg_name] = agg return await s.execute() response = await run_search() while response.aggregations["comp"].buckets: for b in response.aggregations["comp"].buckets: yield cast(CompositeAggregate, b) if "after_key" in response.aggregations["comp"]: after = response.aggregations["comp"].after_key else: after = response.aggregations["comp"].buckets[-1].key response = await run_search(after=after) async def main() -> None: # initiate the default connection to elasticsearch client = async_connections.create_connection( hosts=[os.environ["ELASTICSEARCH_URL"]] ) # create the index and populate it with some data # note that the dataset is imported from the library's test suite await client.indices.delete(index="git", ignore_unavailable=True) await client.indices.create(index="git", **GIT_INDEX) await async_bulk(client, DATA, raise_on_error=True, refresh=True) # run some aggregations on the data async for b in scan_aggs( AsyncSearch(index="git"), [{"files": aggs.Terms(field="files")}], {"first_seen": aggs.Min(field="committed_date")}, ): print( "File %s has been modified %d times, first seen at %s." % (b.key.files, b.doc_count, b.first_seen.value_as_string) ) # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/esql_employees.py000066400000000000000000000116571506101734100250000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # ES|QL query builder example Requirements: $ pip install "elasticsearch[async]" faker To run the example: $ python esql_employees.py "name to search" The index will be created automatically with a list of 1000 randomly generated employees if it does not exist. Add `--recreate-index` or `-r` to the command to regenerate it. Examples: $ python esql_employees "Mark" # employees named Mark (first or last names) $ python esql_employees "Sarah" --limit 10 # up to 10 employees named Sarah $ python esql_employees "Sam" --sort height # sort results by height $ python esql_employees "Sam" --sort name # sort results by last name """ import argparse import asyncio import os import random from faker import Faker from elasticsearch.dsl import AsyncDocument, InnerDoc, M, async_connections from elasticsearch.esql import ESQLBase from elasticsearch.esql.functions import concat, multi_match fake = Faker() class Address(InnerDoc): address: M[str] city: M[str] zip_code: M[str] class Employee(AsyncDocument): emp_no: M[int] first_name: M[str] last_name: M[str] height: M[float] still_hired: M[bool] address: M[Address] class Index: name = "employees" @property def name(self) -> str: return f"{self.first_name} {self.last_name}" def __repr__(self) -> str: return f"" async def create(num_employees: int = 1000) -> None: print("Creating a new employee index...") if await Employee._index.exists(): await Employee._index.delete() await Employee.init() for i in range(num_employees): address = Address( address=fake.address(), city=fake.city(), zip_code=fake.zipcode() ) emp = Employee( emp_no=10000 + i, first_name=fake.first_name(), last_name=fake.last_name(), height=int((random.random() * 0.8 + 1.5) * 1000) / 1000, still_hired=random.random() >= 0.5, address=address, ) await emp.save() await Employee._index.refresh() async def search(query: str, limit: int, sort: str) -> None: q: ESQLBase = ( Employee.esql_from() .where(multi_match(query, Employee.first_name, Employee.last_name)) .eval(full_name=concat(Employee.first_name, " ", Employee.last_name)) ) if sort == "height": q = q.sort(Employee.height.desc()) elif sort == "name": q = q.sort(Employee.last_name.asc()) q = q.limit(limit) async for result in Employee.esql_execute(q, return_additional=True): assert type(result) == tuple employee = result[0] full_name = result[1]["full_name"] print( f"{full_name:<20}", f"{'Hired' if employee.still_hired else 'Not hired':<10}", f"{employee.height:5.2f}m", f"{employee.address.city:<20}", ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Employee ES|QL example") parser.add_argument( "--recreate-index", "-r", action="store_true", help="Recreate and populate the index", ) parser.add_argument( "--limit", action="store", type=int, default=100, help="Maximum number or employees to return (default: 100)", ) parser.add_argument( "--sort", action="store", type=str, default=None, help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)', ) parser.add_argument( "query", action="store", help="The name or partial name to search for" ) return parser.parse_args() async def main() -> None: args = parse_args() # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not await Employee._index.exists(): await create() await Employee.init() await search(args.query, args.limit, args.sort) # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/parent_child.py000066400000000000000000000206601506101734100244000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Complex data model example modeling stackoverflow-like data. It is used to showcase several key features of elasticsearch-dsl: * Object and Nested fields: see User and Comment classes and fields they are used in * method add_comment is used to add comments * Parent/Child relationship * See the Join field on Post creating the relationship between Question and Answer * Meta.matches allows the hits from same index to be wrapped in proper classes * to see how child objects are created see Question.add_answer * Question.search_answers shows how to query for children of a particular parent """ import asyncio import os from datetime import datetime from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast from elasticsearch.dsl import ( AsyncDocument, AsyncSearch, Date, InnerDoc, Join, Keyword, Long, Text, async_connections, mapped_field, ) class User(InnerDoc): """ Class used to represent a denormalized user stored on other objects. """ id: int = mapped_field(Long()) signed_up: Optional[datetime] = mapped_field(Date()) username: str = mapped_field(Text(fields={"keyword": Keyword()})) email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) class Comment(InnerDoc): """ Class wrapper for nested comment objects. """ author: User created: datetime content: str class Post(AsyncDocument): """ Base class for Question and Answer containing the common fields. """ author: User if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _routing: str = mapped_field(default=None) _id: Optional[int] = mapped_field(default=None) created: Optional[datetime] = mapped_field(default=None) body: str = mapped_field(default="") comments: List[Comment] = mapped_field(default_factory=list) question_answer: Any = mapped_field( Join(relations={"question": "answer"}), default_factory=dict ) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: # Post is an abstract class, make sure it never gets used for # deserialization return False class Index: name = "test-qa-site" settings = { "number_of_shards": 1, "number_of_replicas": 0, } async def add_comment( self, user: User, content: str, created: Optional[datetime] = None, commit: Optional[bool] = True, ) -> Comment: c = Comment(author=user, content=content, created=created or datetime.now()) self.comments.append(c) if commit: await self.save() return c async def save(self, **kwargs: Any) -> None: # type: ignore[override] # if there is no date, use now if self.created is None: self.created = datetime.now() await super().save(**kwargs) class Question(Post): tags: List[str] = mapped_field( default_factory=list ) # .tags will return empty list if not present title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: """Use Question class for parent documents""" return bool(hit["_source"]["question_answer"] == "question") @classmethod def search(cls, **kwargs: Any) -> AsyncSearch: # type: ignore[override] return cls._index.search(**kwargs).filter("term", question_answer="question") async def add_answer( self, user: User, body: str, created: Optional[datetime] = None, accepted: bool = False, commit: Optional[bool] = True, ) -> "Answer": answer = Answer( # required make sure the answer is stored in the same shard _routing=self.meta.id, # set up the parent/child mapping question_answer={"name": "answer", "parent": self.meta.id}, # pass in the field values author=user, created=created, body=body, is_accepted=accepted, ) if commit: await answer.save() return answer def search_answers(self) -> AsyncSearch: # search only our index s = Answer.search() # filter for answers belonging to us s = s.filter("parent_id", type="answer", id=self.meta.id) # add routing to only go to specific shard s = s.params(routing=self.meta.id) return s async def get_answers(self) -> List[Any]: """ Get answers either from inner_hits already present or by searching elasticsearch. """ if "inner_hits" in self.meta and "answer" in self.meta.inner_hits: return cast(List[Any], self.meta.inner_hits["answer"].hits) return [a async for a in self.search_answers()] async def save(self, **kwargs: Any) -> None: # type: ignore[override] self.question_answer = "question" await super().save(**kwargs) class Answer(Post): is_accepted: bool = mapped_field(default=False) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: """Use Answer class for child documents with child name 'answer'""" return ( isinstance(hit["_source"]["question_answer"], dict) and hit["_source"]["question_answer"].get("name") == "answer" ) @classmethod def search(cls, **kwargs: Any) -> AsyncSearch: # type: ignore[override] return cls._index.search(**kwargs).exclude("term", question_answer="question") async def get_question(self) -> Optional[Question]: # cache question in self.meta # any attributes set on self would be interpreted as fields if "question" not in self.meta: self.meta.question = await Question.get( id=self.question_answer.parent, index=self.meta.index ) return cast(Optional[Question], self.meta.question) async def save(self, **kwargs: Any) -> None: # type: ignore[override] # set routing to parents id automatically self.meta.routing = self.question_answer.parent await super().save(**kwargs) async def setup() -> None: """Create an IndexTemplate and save it into elasticsearch.""" index_template = Post._index.as_composable_template("base", priority=100) await index_template.save() async def main() -> Answer: # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create index await setup() # user objects to use nick = User( id=47, signed_up=datetime(2017, 4, 3), username="fxdgear", email="nick.lang@elastic.co", location="Colorado", ) honza = User( id=42, signed_up=datetime(2013, 4, 3), username="honzakral", email="honza@elastic.co", location="Prague", ) # create a question object question = Question( _id=1, author=nick, tags=["elasticsearch", "python"], title="How do I use elasticsearch from Python?", body=""" I want to use elasticsearch, how do I do it from Python? """, ) await question.save() answer = await question.add_answer(honza, "Just use `elasticsearch-py`!") # close the connection await async_connections.get_connection().close() return answer if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/percolate.py000066400000000000000000000067501506101734100237260ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import os from typing import TYPE_CHECKING, Any, List, Optional from elasticsearch.dsl import ( AsyncDocument, AsyncSearch, Keyword, Percolator, Q, Query, async_connections, mapped_field, ) class BlogPost(AsyncDocument): """ Blog posts that will be automatically tagged based on percolation queries. """ if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: int content: Optional[str] tags: List[str] = mapped_field(Keyword(), default_factory=list) class Index: name = "test-blogpost" async def add_tags(self) -> None: # run a percolation to automatically tag the blog post. s = AsyncSearch(index="test-percolator") s = s.query( "percolate", field="query", index=self._get_index(), document=self.to_dict() ) # collect all the tags from matched percolators async for percolator in s: self.tags.extend(percolator.tags) # make sure tags are unique self.tags = list(set(self.tags)) async def save(self, **kwargs: Any) -> None: # type: ignore[override] await self.add_tags() await super().save(**kwargs) class PercolatorDoc(AsyncDocument): """ Document class used for storing the percolation queries. """ if TYPE_CHECKING: _id: str # relevant fields from BlogPost must be also present here for the queries # to be able to use them. Another option would be to use document # inheritance but save() would have to be reset to normal behavior. content: Optional[str] # the percolator query to be run against the doc query: Query = mapped_field(Percolator()) # list of tags to append to a document tags: List[str] = mapped_field(Keyword(multi=True)) class Index: name = "test-percolator" settings = {"number_of_shards": 1, "number_of_replicas": 0} async def setup() -> None: # create the percolator index if it doesn't exist if not await PercolatorDoc._index.exists(): await PercolatorDoc.init() # register a percolation query looking for documents about python await PercolatorDoc( _id="python", tags=["programming", "development", "python"], content="", query=Q("match", content="python"), ).save(refresh=True) async def main() -> None: # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) await setup() # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/search_as_you_type.py000066400000000000000000000057351506101734100256370ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example ``Document`` with search_as_you_type field datatype and how to search it. When creating a field with search_as_you_type datatype ElasticSearch creates additional subfields to enable efficient as-you-type completion, matching terms at any position within the input. To custom analyzer with ascii folding allow search to work in different languages. """ import asyncio import os from typing import TYPE_CHECKING, Optional from elasticsearch.dsl import ( AsyncDocument, SearchAsYouType, async_connections, mapped_field, ) from elasticsearch.dsl.query import MultiMatch class Person(AsyncDocument): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: Optional[int] = mapped_field(default=None) name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="") class Index: name = "test-search-as-you-type" settings = {"number_of_shards": 1, "number_of_replicas": 0} async def main() -> None: # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index await Person.init() import pprint pprint.pprint(Person().to_dict(), indent=2) # index some sample data names = [ "Andy Warhol", "Alphonse Mucha", "Henri de Toulouse-Lautrec", "Jára Cimrman", ] for id, name in enumerate(names): await Person(_id=id, name=name).save() # refresh index manually to make changes live await Person._index.refresh() # run some suggestions for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"): s = Person.search() s.query = MultiMatch( # type: ignore[assignment] query=text, type="bool_prefix", fields=["name", "name._2gram", "name._3gram"], ) response = await s.execute() # print out all the options we got for h in response: print("%15s: %25s" % (text, h.name)) # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/semantic_text.py000066400000000000000000000110021506101734100246010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Semantic Text example Requirements: $ pip install "elasticsearch[async]" tqdm Before running this example, an ELSER inference endpoint must be created in the Elasticsearch cluster. This can be done manually from Kibana, or with the following curl command from a terminal: curl -X PUT \ "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \ -H "Content-Type: application/json" \ -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}' To run the example: $ python semantic_text.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to the command to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python semantic_text.py "work from home" $ python semantic_text.py "vacation time" $ python semantic_text.py "can I bring a bird to work?" When the index is created, the inference service will split the documents into short passages, and for each passage a sparse embedding will be generated using Elastic's ELSER v2 model. """ import argparse import asyncio import json import os from datetime import datetime from typing import Any, Optional from urllib.request import urlopen from tqdm import tqdm from elasticsearch import dsl DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" class WorkplaceDoc(dsl.AsyncDocument): class Index: name = "workplace_documents_semantic" name: str summary: str content: Any = dsl.mapped_field( dsl.field.SemanticText(inference_id="my-elser-endpoint") ) created: datetime updated: Optional[datetime] url: str = dsl.mapped_field(dsl.Keyword()) category: str = dsl.mapped_field(dsl.Keyword()) async def create() -> None: # create the index await WorkplaceDoc._index.delete(ignore_unavailable=True) await WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) await doc.save() # refresh the index await WorkplaceDoc._index.refresh() async def search(query: str) -> dsl.AsyncSearch[WorkplaceDoc]: search = WorkplaceDoc.search() search = search[:5] return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query)) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() async def main() -> None: args = parse_args() # initiate the default connection to elasticsearch dsl.async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not await WorkplaceDoc._index.exists(): await create() results = await search(args.query) async for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Content: {hit.content.text}") print("--------------------\n") # close the connection await dsl.async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/sparse_vectors.py000066400000000000000000000143151506101734100250060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Sparse vector database example Requirements: $ pip install nltk tqdm "elasticsearch[async]" Before running this example, the ELSER v2 model must be downloaded and deployed to the Elasticsearch cluster, and an ingest pipeline must be defined. This can be done manually from Kibana, or with the following three curl commands from a terminal, adjusting the endpoint as needed: curl -X PUT \ "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \ -H "Content-Type: application/json" \ -d '{"input":{"field_names":["text_field"]}}' curl -X POST \ "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated" curl -X PUT \ "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \ -H "Content-Type: application/json" \ -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}' To run the example: $ python sparse_vectors.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python sparse_vectors.py "work from home" $ python sparse_vectors.py "vacation time" $ python sparse_vectors.py "can I bring a bird to work?" When the index is created, the documents are split into short passages, and for each passage a sparse embedding is generated using Elastic's ELSER v2 model. The documents that are returned as search results are those that have the highest scored passages. Add `--show-inner-hits` to the command to see individual passage results as well. """ import argparse import asyncio import json import os from datetime import datetime from typing import Any, Dict, List, Optional from urllib.request import urlopen import nltk from tqdm import tqdm from elasticsearch.dsl import ( AsyncDocument, AsyncSearch, InnerDoc, Keyword, Q, SparseVector, async_connections, mapped_field, ) DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" # initialize sentence tokenizer nltk.download("punkt_tab", quiet=True) class Passage(InnerDoc): content: Optional[str] embedding: Dict[str, float] = mapped_field(SparseVector(), init=False) class WorkplaceDoc(AsyncDocument): class Index: name = "workplace_documents_sparse" settings = {"default_pipeline": "elser_ingest_pipeline"} name: str summary: str content: str created: datetime updated: Optional[datetime] url: str = mapped_field(Keyword()) category: str = mapped_field(Keyword()) passages: List[Passage] = mapped_field(default=[]) _model: Any = None def clean(self) -> None: # split the content into sentences passages = nltk.sent_tokenize(self.content) # generate an embedding for each passage and save it as a nested document for passage in passages: self.passages.append(Passage(content=passage)) async def create() -> None: # create the index await WorkplaceDoc._index.delete(ignore_unavailable=True) await WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) await doc.save() async def search(query: str) -> AsyncSearch[WorkplaceDoc]: return WorkplaceDoc.search()[:5].query( "nested", path="passages", query=Q( "text_expansion", passages__content={ "model_id": ".elser_model_2", "model_text": query, }, ), inner_hits={"size": 2}, ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument( "--show-inner-hits", action="store_true", help="Show results for individual passages", ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() async def main() -> None: args = parse_args() # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not await WorkplaceDoc._index.exists(): await create() results = await search(args.query) async for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Summary: {hit.summary}") if args.show_inner_hits: for passage in hit.meta.inner_hits["passages"]: print(f" - [Score: {passage.meta.score}] {passage.content!r}") print("") # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/async/vectors.py000066400000000000000000000131651506101734100234330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Vector database example Requirements: $ pip install nltk sentence_transformers tqdm "elasticsearch[async]" To run the example: $ python vectors.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python vectors.py "work from home" $ python vectors.py "vacation time" $ python vectors.py "can I bring a bird to work?" When the index is created, the documents are split into short passages, and for each passage an embedding is generated using the open source "all-MiniLM-L6-v2" model. The documents that are returned as search results are those that have the highest scored passages. Add `--show-inner-hits` to the command to see individual passage results as well. """ import argparse import asyncio import json import os from datetime import datetime from typing import Any, List, Optional, cast from urllib.request import urlopen import nltk from sentence_transformers import SentenceTransformer from tqdm import tqdm from elasticsearch.dsl import ( AsyncDocument, AsyncSearch, DenseVector, InnerDoc, Keyword, M, async_connections, mapped_field, ) DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" MODEL_NAME = "all-MiniLM-L6-v2" # initialize sentence tokenizer nltk.download("punkt_tab", quiet=True) # this will be the embedding model embedding_model: Any = None class Passage(InnerDoc): content: str embedding: List[float] = mapped_field(DenseVector()) class WorkplaceDoc(AsyncDocument): class Index: name = "workplace_documents" name: str summary: str content: str created: datetime updated: Optional[datetime] url: str = mapped_field(Keyword(required=True)) category: str = mapped_field(Keyword(required=True)) passages: M[List[Passage]] = mapped_field(default=[]) @classmethod def get_embedding(cls, input: str) -> List[float]: global embedding_model if embedding_model is None: embedding_model = SentenceTransformer(MODEL_NAME) return cast(List[float], list(embedding_model.encode(input))) def clean(self) -> None: # split the content into sentences passages = cast(List[str], nltk.sent_tokenize(self.content)) # generate an embedding for each passage and save it as a nested document for passage in passages: self.passages.append( Passage(content=passage, embedding=self.get_embedding(passage)) ) async def create() -> None: # create the index await WorkplaceDoc._index.delete(ignore_unavailable=True) await WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) await doc.save() async def search(query: str) -> AsyncSearch[WorkplaceDoc]: return WorkplaceDoc.search().knn( field=WorkplaceDoc.passages.embedding, k=5, num_candidates=50, query_vector=list(WorkplaceDoc.get_embedding(query)), inner_hits={"size": 2}, ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument( "--show-inner-hits", action="store_true", help="Show results for individual passages", ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() async def main() -> None: args = parse_args() # initiate the default connection to elasticsearch async_connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not await WorkplaceDoc._index.exists(): await create() results = await search(args.query) async for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Summary: {hit.summary}") if args.show_inner_hits: for passage in hit.meta.inner_hits["passages"]: print(f" - [Score: {passage.meta.score}] {passage.content!r}") print("") # close the connection await async_connections.get_connection().close() if __name__ == "__main__": asyncio.run(main()) python-elasticsearch-9.1.1/examples/dsl/completion.py000066400000000000000000000071301506101734100227750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example ``Document`` with completion suggester. In the ``Person`` class we index the person's name to allow auto completing in any order ("first last", "middle last first", ...). For the weight we use a value from the ``popularity`` field which is a long. To make the suggestions work in different languages we added a custom analyzer that does ascii folding. """ import os from itertools import permutations from typing import TYPE_CHECKING, Any, Dict, Optional from elasticsearch.dsl import ( Completion, Document, Keyword, Long, Text, analyzer, connections, mapped_field, token_filter, ) # custom analyzer for names ascii_fold = analyzer( "ascii_fold", # we don't want to split O'Brian or Toulouse-Lautrec tokenizer="whitespace", filter=["lowercase", token_filter("ascii_fold", "asciifolding")], ) class Person(Document): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: Optional[int] = mapped_field(default=None) name: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") popularity: int = mapped_field(Long(), default=0) # completion field with a custom analyzer suggest: Dict[str, Any] = mapped_field(Completion(analyzer=ascii_fold), init=False) def clean(self) -> None: """ Automatically construct the suggestion input and weight by taking all possible permutations of Person's name as ``input`` and taking their popularity as ``weight``. """ self.suggest = { "input": [" ".join(p) for p in permutations(self.name.split())], "weight": self.popularity, } class Index: name = "test-suggest" settings = {"number_of_shards": 1, "number_of_replicas": 0} def main() -> None: # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index Person.init() # index some sample data for id, (name, popularity) in enumerate( [("Henri de Toulouse-Lautrec", 42), ("Jára Cimrman", 124)] ): Person(_id=id, name=name, popularity=popularity).save() # refresh index manually to make changes live Person._index.refresh() # run some suggestions for text in ("já", "Jara Cimr", "tou", "de hen"): s = Person.search() s = s.suggest("auto_complete", text, completion={"field": "suggest"}) response = s.execute() # print out all the options we got for option in response.suggest["auto_complete"][0].options: print("%10s: %25s (%d)" % (text, option._source.name, option._score)) # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/composite_agg.py000066400000000000000000000062661506101734100234550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from typing import Any, Dict, Iterator, Mapping, Sequence, cast from elasticsearch.dsl import Agg, Response, Search, aggs, connections from elasticsearch.dsl.types import CompositeAggregate from elasticsearch.helpers import bulk from test_elasticsearch.test_dsl.test_integration.test_data import DATA, GIT_INDEX def scan_aggs( search: Search, source_aggs: Sequence[Mapping[str, Agg]], inner_aggs: Dict[str, Agg] = {}, size: int = 10, ) -> Iterator[CompositeAggregate]: """ Helper function used to iterate over all possible bucket combinations of ``source_aggs``, returning results of ``inner_aggs`` for each. Uses the ``composite`` aggregation under the hood to perform this. """ def run_search(**kwargs: Any) -> Response: s = search[:0] bucket = s.aggs.bucket( "comp", aggs.Composite( sources=source_aggs, size=size, **kwargs, ), ) for agg_name, agg in inner_aggs.items(): bucket[agg_name] = agg return s.execute() response = run_search() while response.aggregations["comp"].buckets: for b in response.aggregations["comp"].buckets: yield cast(CompositeAggregate, b) if "after_key" in response.aggregations["comp"]: after = response.aggregations["comp"].after_key else: after = response.aggregations["comp"].buckets[-1].key response = run_search(after=after) def main() -> None: # initiate the default connection to elasticsearch client = connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the index and populate it with some data # note that the dataset is imported from the library's test suite client.indices.delete(index="git", ignore_unavailable=True) client.indices.create(index="git", **GIT_INDEX) bulk(client, DATA, raise_on_error=True, refresh=True) # run some aggregations on the data for b in scan_aggs( Search(index="git"), [{"files": aggs.Terms(field="files")}], {"first_seen": aggs.Min(field="committed_date")}, ): print( "File %s has been modified %d times, first seen at %s." % (b.key.files, b.doc_count, b.first_seen.value_as_string) ) # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/esql_employees.py000066400000000000000000000114321506101734100236520ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # ES|QL query builder example Requirements: $ pip install elasticsearch faker To run the example: $ python esql_employees.py "name to search" The index will be created automatically with a list of 1000 randomly generated employees if it does not exist. Add `--recreate-index` or `-r` to the command to regenerate it. Examples: $ python esql_employees "Mark" # employees named Mark (first or last names) $ python esql_employees "Sarah" --limit 10 # up to 10 employees named Sarah $ python esql_employees "Sam" --sort height # sort results by height $ python esql_employees "Sam" --sort name # sort results by last name """ import argparse import os import random from faker import Faker from elasticsearch.dsl import Document, InnerDoc, M, connections from elasticsearch.esql import ESQLBase from elasticsearch.esql.functions import concat, multi_match fake = Faker() class Address(InnerDoc): address: M[str] city: M[str] zip_code: M[str] class Employee(Document): emp_no: M[int] first_name: M[str] last_name: M[str] height: M[float] still_hired: M[bool] address: M[Address] class Index: name = "employees" @property def name(self) -> str: return f"{self.first_name} {self.last_name}" def __repr__(self) -> str: return f"" def create(num_employees: int = 1000) -> None: print("Creating a new employee index...") if Employee._index.exists(): Employee._index.delete() Employee.init() for i in range(num_employees): address = Address( address=fake.address(), city=fake.city(), zip_code=fake.zipcode() ) emp = Employee( emp_no=10000 + i, first_name=fake.first_name(), last_name=fake.last_name(), height=int((random.random() * 0.8 + 1.5) * 1000) / 1000, still_hired=random.random() >= 0.5, address=address, ) emp.save() Employee._index.refresh() def search(query: str, limit: int, sort: str) -> None: q: ESQLBase = ( Employee.esql_from() .where(multi_match(query, Employee.first_name, Employee.last_name)) .eval(full_name=concat(Employee.first_name, " ", Employee.last_name)) ) if sort == "height": q = q.sort(Employee.height.desc()) elif sort == "name": q = q.sort(Employee.last_name.asc()) q = q.limit(limit) for result in Employee.esql_execute(q, return_additional=True): assert type(result) == tuple employee = result[0] full_name = result[1]["full_name"] print( f"{full_name:<20}", f"{'Hired' if employee.still_hired else 'Not hired':<10}", f"{employee.height:5.2f}m", f"{employee.address.city:<20}", ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Employee ES|QL example") parser.add_argument( "--recreate-index", "-r", action="store_true", help="Recreate and populate the index", ) parser.add_argument( "--limit", action="store", type=int, default=100, help="Maximum number or employees to return (default: 100)", ) parser.add_argument( "--sort", action="store", type=str, default=None, help='Sort by "name" (ascending) or by "height" (descending) (default: no sorting)', ) parser.add_argument( "query", action="store", help="The name or partial name to search for" ) return parser.parse_args() def main() -> None: args = parse_args() # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not Employee._index.exists(): create() Employee.init() search(args.query, args.limit, args.sort) # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/parent_child.py000066400000000000000000000203461506101734100232640ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Complex data model example modeling stackoverflow-like data. It is used to showcase several key features of elasticsearch-dsl: * Object and Nested fields: see User and Comment classes and fields they are used in * method add_comment is used to add comments * Parent/Child relationship * See the Join field on Post creating the relationship between Question and Answer * Meta.matches allows the hits from same index to be wrapped in proper classes * to see how child objects are created see Question.add_answer * Question.search_answers shows how to query for children of a particular parent """ import os from datetime import datetime from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast from elasticsearch.dsl import ( Date, Document, InnerDoc, Join, Keyword, Long, Search, Text, connections, mapped_field, ) class User(InnerDoc): """ Class used to represent a denormalized user stored on other objects. """ id: int = mapped_field(Long()) signed_up: Optional[datetime] = mapped_field(Date()) username: str = mapped_field(Text(fields={"keyword": Keyword()})) email: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) location: Optional[str] = mapped_field(Text(fields={"keyword": Keyword()})) class Comment(InnerDoc): """ Class wrapper for nested comment objects. """ author: User created: datetime content: str class Post(Document): """ Base class for Question and Answer containing the common fields. """ author: User if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _routing: str = mapped_field(default=None) _id: Optional[int] = mapped_field(default=None) created: Optional[datetime] = mapped_field(default=None) body: str = mapped_field(default="") comments: List[Comment] = mapped_field(default_factory=list) question_answer: Any = mapped_field( Join(relations={"question": "answer"}), default_factory=dict ) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: # Post is an abstract class, make sure it never gets used for # deserialization return False class Index: name = "test-qa-site" settings = { "number_of_shards": 1, "number_of_replicas": 0, } def add_comment( self, user: User, content: str, created: Optional[datetime] = None, commit: Optional[bool] = True, ) -> Comment: c = Comment(author=user, content=content, created=created or datetime.now()) self.comments.append(c) if commit: self.save() return c def save(self, **kwargs: Any) -> None: # type: ignore[override] # if there is no date, use now if self.created is None: self.created = datetime.now() super().save(**kwargs) class Question(Post): tags: List[str] = mapped_field( default_factory=list ) # .tags will return empty list if not present title: str = mapped_field(Text(fields={"keyword": Keyword()}), default="") @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: """Use Question class for parent documents""" return bool(hit["_source"]["question_answer"] == "question") @classmethod def search(cls, **kwargs: Any) -> Search: # type: ignore[override] return cls._index.search(**kwargs).filter("term", question_answer="question") def add_answer( self, user: User, body: str, created: Optional[datetime] = None, accepted: bool = False, commit: Optional[bool] = True, ) -> "Answer": answer = Answer( # required make sure the answer is stored in the same shard _routing=self.meta.id, # set up the parent/child mapping question_answer={"name": "answer", "parent": self.meta.id}, # pass in the field values author=user, created=created, body=body, is_accepted=accepted, ) if commit: answer.save() return answer def search_answers(self) -> Search: # search only our index s = Answer.search() # filter for answers belonging to us s = s.filter("parent_id", type="answer", id=self.meta.id) # add routing to only go to specific shard s = s.params(routing=self.meta.id) return s def get_answers(self) -> List[Any]: """ Get answers either from inner_hits already present or by searching elasticsearch. """ if "inner_hits" in self.meta and "answer" in self.meta.inner_hits: return cast(List[Any], self.meta.inner_hits["answer"].hits) return [a for a in self.search_answers()] def save(self, **kwargs: Any) -> None: # type: ignore[override] self.question_answer = "question" super().save(**kwargs) class Answer(Post): is_accepted: bool = mapped_field(default=False) @classmethod def _matches(cls, hit: Dict[str, Any]) -> bool: """Use Answer class for child documents with child name 'answer'""" return ( isinstance(hit["_source"]["question_answer"], dict) and hit["_source"]["question_answer"].get("name") == "answer" ) @classmethod def search(cls, **kwargs: Any) -> Search: # type: ignore[override] return cls._index.search(**kwargs).exclude("term", question_answer="question") def get_question(self) -> Optional[Question]: # cache question in self.meta # any attributes set on self would be interpreted as fields if "question" not in self.meta: self.meta.question = Question.get( id=self.question_answer.parent, index=self.meta.index ) return cast(Optional[Question], self.meta.question) def save(self, **kwargs: Any) -> None: # type: ignore[override] # set routing to parents id automatically self.meta.routing = self.question_answer.parent super().save(**kwargs) def setup() -> None: """Create an IndexTemplate and save it into elasticsearch.""" index_template = Post._index.as_composable_template("base", priority=100) index_template.save() def main() -> Answer: # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create index setup() # user objects to use nick = User( id=47, signed_up=datetime(2017, 4, 3), username="fxdgear", email="nick.lang@elastic.co", location="Colorado", ) honza = User( id=42, signed_up=datetime(2013, 4, 3), username="honzakral", email="honza@elastic.co", location="Prague", ) # create a question object question = Question( _id=1, author=nick, tags=["elasticsearch", "python"], title="How do I use elasticsearch from Python?", body=""" I want to use elasticsearch, how do I do it from Python? """, ) question.save() answer = question.add_answer(honza, "Just use `elasticsearch-py`!") # close the connection connections.get_connection().close() return answer if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/percolate.py000066400000000000000000000065311506101734100226060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from typing import TYPE_CHECKING, Any, List, Optional from elasticsearch.dsl import ( Document, Keyword, Percolator, Q, Query, Search, connections, mapped_field, ) class BlogPost(Document): """ Blog posts that will be automatically tagged based on percolation queries. """ if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: int content: Optional[str] tags: List[str] = mapped_field(Keyword(), default_factory=list) class Index: name = "test-blogpost" def add_tags(self) -> None: # run a percolation to automatically tag the blog post. s = Search(index="test-percolator") s = s.query( "percolate", field="query", index=self._get_index(), document=self.to_dict() ) # collect all the tags from matched percolators for percolator in s: self.tags.extend(percolator.tags) # make sure tags are unique self.tags = list(set(self.tags)) def save(self, **kwargs: Any) -> None: # type: ignore[override] self.add_tags() super().save(**kwargs) class PercolatorDoc(Document): """ Document class used for storing the percolation queries. """ if TYPE_CHECKING: _id: str # relevant fields from BlogPost must be also present here for the queries # to be able to use them. Another option would be to use document # inheritance but save() would have to be reset to normal behavior. content: Optional[str] # the percolator query to be run against the doc query: Query = mapped_field(Percolator()) # list of tags to append to a document tags: List[str] = mapped_field(Keyword(multi=True)) class Index: name = "test-percolator" settings = {"number_of_shards": 1, "number_of_replicas": 0} def setup() -> None: # create the percolator index if it doesn't exist if not PercolatorDoc._index.exists(): PercolatorDoc.init() # register a percolation query looking for documents about python PercolatorDoc( _id="python", tags=["programming", "development", "python"], content="", query=Q("match", content="python"), ).save(refresh=True) def main() -> None: # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) setup() # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/search_as_you_type.py000066400000000000000000000056011506101734100245120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example ``Document`` with search_as_you_type field datatype and how to search it. When creating a field with search_as_you_type datatype ElasticSearch creates additional subfields to enable efficient as-you-type completion, matching terms at any position within the input. To custom analyzer with ascii folding allow search to work in different languages. """ import os from typing import TYPE_CHECKING, Optional from elasticsearch.dsl import ( Document, SearchAsYouType, connections, mapped_field, ) from elasticsearch.dsl.query import MultiMatch class Person(Document): if TYPE_CHECKING: # definitions here help type checkers understand additional arguments # that are allowed in the constructor _id: Optional[int] = mapped_field(default=None) name: str = mapped_field(SearchAsYouType(max_shingle_size=3), default="") class Index: name = "test-search-as-you-type" settings = {"number_of_shards": 1, "number_of_replicas": 0} def main() -> None: # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) # create the empty index Person.init() import pprint pprint.pprint(Person().to_dict(), indent=2) # index some sample data names = [ "Andy Warhol", "Alphonse Mucha", "Henri de Toulouse-Lautrec", "Jára Cimrman", ] for id, name in enumerate(names): Person(_id=id, name=name).save() # refresh index manually to make changes live Person._index.refresh() # run some suggestions for text in ("já", "Cimr", "toulouse", "Henri Tou", "a"): s = Person.search() s.query = MultiMatch( # type: ignore[assignment] query=text, type="bool_prefix", fields=["name", "name._2gram", "name._3gram"], ) response = s.execute() # print out all the options we got for h in response: print("%15s: %25s" % (text, h.name)) # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/semantic_text.py000066400000000000000000000105771506101734100235040ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Semantic Text example Requirements: $ pip install elasticsearch tqdm Before running this example, an ELSER inference endpoint must be created in the Elasticsearch cluster. This can be done manually from Kibana, or with the following curl command from a terminal: curl -X PUT \ "$ELASTICSEARCH_URL/_inference/sparse_embedding/my-elser-endpoint" \ -H "Content-Type: application/json" \ -d '{"service":"elser","service_settings":{"num_allocations":1,"num_threads":1}}' To run the example: $ python semantic_text.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to the command to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python semantic_text.py "work from home" $ python semantic_text.py "vacation time" $ python semantic_text.py "can I bring a bird to work?" When the index is created, the inference service will split the documents into short passages, and for each passage a sparse embedding will be generated using Elastic's ELSER v2 model. """ import argparse import json import os from datetime import datetime from typing import Any, Optional from urllib.request import urlopen from tqdm import tqdm from elasticsearch import dsl DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" class WorkplaceDoc(dsl.Document): class Index: name = "workplace_documents_semantic" name: str summary: str content: Any = dsl.mapped_field( dsl.field.SemanticText(inference_id="my-elser-endpoint") ) created: datetime updated: Optional[datetime] url: str = dsl.mapped_field(dsl.Keyword()) category: str = dsl.mapped_field(dsl.Keyword()) def create() -> None: # create the index WorkplaceDoc._index.delete(ignore_unavailable=True) WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) doc.save() # refresh the index WorkplaceDoc._index.refresh() def search(query: str) -> dsl.Search[WorkplaceDoc]: search = WorkplaceDoc.search() search = search[:5] return search.query(dsl.query.Semantic(field=WorkplaceDoc.content, query=query)) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() def main() -> None: args = parse_args() # initiate the default connection to elasticsearch dsl.connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not WorkplaceDoc._index.exists(): create() results = search(args.query) for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Content: {hit.content.text}") print("--------------------\n") # close the connection dsl.connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/sparse_vectors.py000066400000000000000000000141001506101734100236610ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Sparse vector database example Requirements: $ pip install nltk tqdm elasticsearch Before running this example, the ELSER v2 model must be downloaded and deployed to the Elasticsearch cluster, and an ingest pipeline must be defined. This can be done manually from Kibana, or with the following three curl commands from a terminal, adjusting the endpoint as needed: curl -X PUT \ "http://localhost:9200/_ml/trained_models/.elser_model_2?wait_for_completion" \ -H "Content-Type: application/json" \ -d '{"input":{"field_names":["text_field"]}}' curl -X POST \ "http://localhost:9200/_ml/trained_models/.elser_model_2/deployment/_start?wait_for=fully_allocated" curl -X PUT \ "http://localhost:9200/_ingest/pipeline/elser_ingest_pipeline" \ -H "Content-Type: application/json" \ -d '{"processors":[{"foreach":{"field":"passages","processor":{"inference":{"model_id":".elser_model_2","input_output":[{"input_field":"_ingest._value.content","output_field":"_ingest._value.embedding"}]}}}}]}' To run the example: $ python sparse_vectors.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python sparse_vectors.py "work from home" $ python sparse_vectors.py "vacation time" $ python sparse_vectors.py "can I bring a bird to work?" When the index is created, the documents are split into short passages, and for each passage a sparse embedding is generated using Elastic's ELSER v2 model. The documents that are returned as search results are those that have the highest scored passages. Add `--show-inner-hits` to the command to see individual passage results as well. """ import argparse import json import os from datetime import datetime from typing import Any, Dict, List, Optional from urllib.request import urlopen import nltk from tqdm import tqdm from elasticsearch.dsl import ( Document, InnerDoc, Keyword, Q, Search, SparseVector, connections, mapped_field, ) DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" # initialize sentence tokenizer nltk.download("punkt_tab", quiet=True) class Passage(InnerDoc): content: Optional[str] embedding: Dict[str, float] = mapped_field(SparseVector(), init=False) class WorkplaceDoc(Document): class Index: name = "workplace_documents_sparse" settings = {"default_pipeline": "elser_ingest_pipeline"} name: str summary: str content: str created: datetime updated: Optional[datetime] url: str = mapped_field(Keyword()) category: str = mapped_field(Keyword()) passages: List[Passage] = mapped_field(default=[]) _model: Any = None def clean(self) -> None: # split the content into sentences passages = nltk.sent_tokenize(self.content) # generate an embedding for each passage and save it as a nested document for passage in passages: self.passages.append(Passage(content=passage)) def create() -> None: # create the index WorkplaceDoc._index.delete(ignore_unavailable=True) WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) doc.save() def search(query: str) -> Search[WorkplaceDoc]: return WorkplaceDoc.search()[:5].query( "nested", path="passages", query=Q( "text_expansion", passages__content={ "model_id": ".elser_model_2", "model_text": query, }, ), inner_hits={"size": 2}, ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument( "--show-inner-hits", action="store_true", help="Show results for individual passages", ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() def main() -> None: args = parse_args() # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not WorkplaceDoc._index.exists(): create() results = search(args.query) for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Summary: {hit.summary}") if args.show_inner_hits: for passage in hit.meta.inner_hits["passages"]: print(f" - [Score: {passage.meta.score}] {passage.content!r}") print("") # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/dsl/vectors.py000066400000000000000000000127501506101734100223150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ # Vector database example Requirements: $ pip install nltk sentence_transformers tqdm elasticsearch To run the example: $ python vectors.py "text to search" The index will be created automatically if it does not exist. Add `--recreate-index` to regenerate it. The example dataset includes a selection of workplace documents. The following are good example queries to try out with this dataset: $ python vectors.py "work from home" $ python vectors.py "vacation time" $ python vectors.py "can I bring a bird to work?" When the index is created, the documents are split into short passages, and for each passage an embedding is generated using the open source "all-MiniLM-L6-v2" model. The documents that are returned as search results are those that have the highest scored passages. Add `--show-inner-hits` to the command to see individual passage results as well. """ import argparse import json import os from datetime import datetime from typing import Any, List, Optional, cast from urllib.request import urlopen import nltk from sentence_transformers import SentenceTransformer from tqdm import tqdm from elasticsearch.dsl import ( DenseVector, Document, InnerDoc, Keyword, M, Search, connections, mapped_field, ) DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json" MODEL_NAME = "all-MiniLM-L6-v2" # initialize sentence tokenizer nltk.download("punkt_tab", quiet=True) # this will be the embedding model embedding_model: Any = None class Passage(InnerDoc): content: str embedding: List[float] = mapped_field(DenseVector()) class WorkplaceDoc(Document): class Index: name = "workplace_documents" name: str summary: str content: str created: datetime updated: Optional[datetime] url: str = mapped_field(Keyword(required=True)) category: str = mapped_field(Keyword(required=True)) passages: M[List[Passage]] = mapped_field(default=[]) @classmethod def get_embedding(cls, input: str) -> List[float]: global embedding_model if embedding_model is None: embedding_model = SentenceTransformer(MODEL_NAME) return cast(List[float], list(embedding_model.encode(input))) def clean(self) -> None: # split the content into sentences passages = cast(List[str], nltk.sent_tokenize(self.content)) # generate an embedding for each passage and save it as a nested document for passage in passages: self.passages.append( Passage(content=passage, embedding=self.get_embedding(passage)) ) def create() -> None: # create the index WorkplaceDoc._index.delete(ignore_unavailable=True) WorkplaceDoc.init() # download the data dataset = json.loads(urlopen(DATASET_URL).read()) # import the dataset for data in tqdm(dataset, desc="Indexing documents..."): doc = WorkplaceDoc( name=data["name"], summary=data["summary"], content=data["content"], created=data.get("created_on"), updated=data.get("updated_at"), url=data["url"], category=data["category"], ) doc.save() def search(query: str) -> Search[WorkplaceDoc]: return WorkplaceDoc.search().knn( field=WorkplaceDoc.passages.embedding, k=5, num_candidates=50, query_vector=list(WorkplaceDoc.get_embedding(query)), inner_hits={"size": 2}, ) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Vector database with Elasticsearch") parser.add_argument( "--recreate-index", action="store_true", help="Recreate and populate the index" ) parser.add_argument( "--show-inner-hits", action="store_true", help="Show results for individual passages", ) parser.add_argument("query", action="store", help="The search query") return parser.parse_args() def main() -> None: args = parse_args() # initiate the default connection to elasticsearch connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]]) if args.recreate_index or not WorkplaceDoc._index.exists(): create() results = search(args.query) for hit in results: print( f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]" ) print(f"Summary: {hit.summary}") if args.show_inner_hits: for passage in hit.meta.inner_hits["passages"]: print(f" - [Score: {passage.meta.score}] {passage.content!r}") print("") # close the connection connections.get_connection().close() if __name__ == "__main__": main() python-elasticsearch-9.1.1/examples/fastapi-apm/000077500000000000000000000000001506101734100216715ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/fastapi-apm/README.md000066400000000000000000000053451506101734100231570ustar00rootroot00000000000000# Async Elasticsearch and Observability Example This example provides a `docker-compose` file based on the [Quick start development environment](https://www.elastic.co/guide/en/apm/get-started/current/quick-start-overview.html) for APM. It gets the default distributions of Elasticsearch, Kibana and APM Server up and running in Docker. The docker-compose file also includes a basic [FastAPI](https://fastapi.tiangolo.com/) application using [`AsyncElasticsearch` to execute requests asychronously](https://elasticsearch-py.readthedocs.io/en/master/async.html) and a script to ping the web app with different endpoints. ## How to Run - [Install Docker Compose](https://docs.docker.com/compose/install/) - Run `docker-compose up` on the root folder of this example (`examples/fastapi-apm/`) ```bash $ cd examples/fastapi-apm $ docker-compose up ``` - Follow the steps on the full documentation [at elastic.co](https://www.elastic.co/guide/en/apm/get-started/current/quick-start-overview.html) to get APM set up in your Kibana instance. The following services will be available: - Kibana: http://localhost:5601 - Elasticsearch: http://localhost:9200 - APM Server: http://localhost:8200 - FastAPI app: http://localhost:9292 Use your web browser or `curl` against http://localhost:9292/ to check that everything is working. You should see a JSON response from `cluster.health`. The docker-compose file will also run a `ping.py` script. This script will make requests irregularly to the web app to simulate proper traffic and fill your APM Dashboard with data, even errors. You can comment the pinger container from `docker-compose.yml` if you want to have all the services running and test the different endpoints (or add your own) by yourself. ## Screenshot ![Kibana APM Dashboard](screenshot.jpg) ## FastAPI App Routes Once the app is running, you can open the following routes in your web browser or via `curl`. The responses are in JSON: * `/` - The root path returns the response from `cluster.health` * `/ingest` - This will bulk insert 1,000 documents in the `games` index in slices of 250 at a time. * `/search/{param}` - Returns search results for `param`. * `/error` - This route will trigger an error. * `/delete` - This route will delete all the data in the `games` index. * `/delete/{id}` - This route will delete a document with the given id from the `games` index. * `/update/` - This route will update the `modified` field on some docs in the `games` index. * `/doc/{id}` - This route will return a document with a given ID from Elasticsearch. ## Data Source Data is based on a DB dump from February 25, 2020 of [TheGamesDB](https://thegamesdb.net/) game data: https://cdn.thegamesdb.net/json/database-latest.json python-elasticsearch-9.1.1/examples/fastapi-apm/app.py000066400000000000000000000051301506101734100230220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V under one or more agreements. # Elasticsearch B.V licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information import aiohttp import datetime import os from fastapi import FastAPI from fastapi.encoders import jsonable_encoder from elasticsearch import AsyncElasticsearch, NotFoundError from elasticsearch.helpers import async_streaming_bulk from elasticapm.contrib.starlette import ElasticAPM, make_apm_client apm = make_apm_client( {"SERVICE_NAME": "fastapi-app", "SERVER_URL": "http://apm-server:8200"} ) client = AsyncElasticsearch(os.environ["ELASTICSEARCH_HOSTS"]) app = FastAPI() app.add_middleware(ElasticAPM, client=apm) @app.on_event("shutdown") async def app_shutdown(): await client.close() async def download_games_db(): async with aiohttp.ClientSession() as http: url = "https://cdn.thegamesdb.net/json/database-latest.json" resp = await http.request("GET", url) for game in (await resp.json())["data"]["games"][:100]: yield game @app.get("/") async def index(): return await client.cluster.health() @app.get("/ingest") async def ingest(): if not (await client.indices.exists(index="games")): await client.indices.create(index="games") async for _ in async_streaming_bulk( client=client, index="games", actions=download_games_db() ): pass return {"status": "ok"} @app.get("/search/{query}") async def search(query): return await client.search( index="games", body={"query": {"multi_match": {"query": query}}} ) @app.get("/delete") async def delete(): return await client.delete_by_query(index="games", body={"query": {"match_all": {}}}) @app.get("/delete/{id}") async def delete_id(id): try: return await client.delete(index="games", id=id) except NotFoundError as e: return e.info, 404 @app.get("/update") async def update(): response = [] docs = await client.search( index="games", body={"query": {"multi_match": {"query": ""}}} ) now = datetime.datetime.utcnow() for doc in docs["hits"]["hits"]: response.append( await client.update( index="games", id=doc["_id"], body={"doc": {"modified": now}} ) ) return jsonable_encoder(response) @app.get("/error") async def error(): try: await client.delete(index="games", id="somerandomid") except NotFoundError as e: return e.info @app.get("/doc/{id}") async def get_doc(id): return await client.get(index="games", id=id) python-elasticsearch-9.1.1/examples/fastapi-apm/docker-compose.yml000066400000000000000000000051131506101734100253260ustar00rootroot00000000000000version: '2.2' services: apm-server: image: docker.elastic.co/apm/apm-server:7.6.2 depends_on: elasticsearch: condition: service_healthy kibana: condition: service_healthy cap_add: ["CHOWN", "DAC_OVERRIDE", "SETGID", "SETUID"] cap_drop: ["ALL"] ports: - 8200:8200 networks: - elastic command: > apm-server -e -E apm-server.rum.enabled=true -E setup.kibana.host=kibana:5601 -E setup.template.settings.index.number_of_replicas=0 -E apm-server.kibana.enabled=true -E apm-server.kibana.host=kibana:5601 -E output.elasticsearch.hosts=["elasticsearch:9200"] healthcheck: interval: 30s retries: 12 test: curl --write-out 'HTTP %{http_code}' --fail --silent --output /dev/null http://localhost:8200/ elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2 environment: - bootstrap.memory_lock=true - cluster.name=docker-cluster - cluster.routing.allocation.disk.threshold_enabled=false - discovery.type=single-node - ES_JAVA_OPTS=-XX:UseAVX=2 -Xms1g -Xmx1g ulimits: memlock: hard: -1 soft: -1 volumes: - esdata:/usr/share/elasticsearch/data ports: - 9200:9200 networks: - elastic healthcheck: interval: 30s retries: 10 test: curl -s http://localhost:9200/_cluster/health | grep -vq '"status":"red"' kibana: image: docker.elastic.co/kibana/kibana:7.6.2 depends_on: elasticsearch: condition: service_healthy environment: ELASTICSEARCH_URL: http://elasticsearch:9200 ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ports: - 5601:5601 networks: - elastic healthcheck: interval: 30s retries: 20 test: curl --write-out 'HTTP %{http_code}' --fail --silent --output /dev/null http://localhost:5601/api/status app: container_name: app build: dockerfile: ${PWD}/dockerfiles/Dockerfile.app context: ${PWD} depends_on: - apm-server environment: ELASTICSEARCH_HOSTS: http://elasticsearch:9200 ports: - 9292:9292 networks: - elastic healthcheck: interval: 30s retries: 20 test: curl --write-out 'HTTP %{http_code}' --fail --silent --output /dev/null http://localhost:9292/ ping: container_name: ping build: dockerfile: ${PWD}/dockerfiles/Dockerfile.ping context: ${PWD} depends_on: - app networks: - elastic volumes: esdata: driver: local networks: elastic: driver: bridge python-elasticsearch-9.1.1/examples/fastapi-apm/dockerfiles/000077500000000000000000000000001506101734100241635ustar00rootroot00000000000000python-elasticsearch-9.1.1/examples/fastapi-apm/dockerfiles/Dockerfile.app000066400000000000000000000003461506101734100267370ustar00rootroot00000000000000FROM python:3.9 EXPOSE 9292 WORKDIR / COPY requirements.txt / RUN python -m pip install \ --no-cache \ --disable-pip-version-check \ -r /requirements.txt COPY app.py / CMD uvicorn app:app --host=0.0.0.0 --port=9292 python-elasticsearch-9.1.1/examples/fastapi-apm/dockerfiles/Dockerfile.ping000066400000000000000000000002271506101734100271120ustar00rootroot00000000000000FROM python:3.9 WORKDIR / RUN python -m pip install \ --no-cache \ --disable-pip-version-check \ urllib3 COPY ping.py / CMD python ping.py python-elasticsearch-9.1.1/examples/fastapi-apm/ping.py000066400000000000000000000014421506101734100232010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V under one or more agreements. # Elasticsearch B.V licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information import random import urllib3 import time endpoints = [ "http://app:9292/", "http://app:9292/ingest", "http://app:9292/search/mario", "http://app:9292/search/sonic", "http://app:9292/search/donkeykong", "http://app:9292/search/bubsy", "http://app:9292/delete", "http://app:9292/update", "http://app:9292/error", ] def main(): http = urllib3.PoolManager() while True: url = random.choice(endpoints) try: http.request("GET", url, preload_content=True) except Exception: pass time.sleep(1) main() python-elasticsearch-9.1.1/examples/fastapi-apm/requirements.in000066400000000000000000000000611506101734100247410ustar00rootroot00000000000000fastapi elasticsearch[async] elastic-apm uvicorn python-elasticsearch-9.1.1/examples/fastapi-apm/requirements.txt000066400000000000000000000005071506101734100251570ustar00rootroot00000000000000aiohttp==3.7.4.post0 asgiref==3.3.4 async-timeout==3.0.1 attrs==21.2.0 certifi==2023.7.22 chardet==4.0.0 click==8.0.1 elastic-apm==6.2.1 elasticsearch[async]==7.13.1 fastapi==0.65.1 h11==0.12.0 idna==3.2 multidict==5.1.0 pydantic==1.8.2 starlette==0.14.2 typing-extensions==3.10.0.0 urllib3==1.26.5 uvicorn==0.14.0 yarl==1.6.3 python-elasticsearch-9.1.1/examples/fastapi-apm/screenshot.jpg000066400000000000000000003105431506101734100245560ustar00rootroot00000000000000JFIFCompressed by jpeg-recompress    ""*%%*424DD\    ""*%%*424DD\%" p{G5ߩ{6]|Mnrm7E%e5{[[m$͊OŪg1l]=Ùؽ=Sx,d~_~>?s?x_?zM:<[/ k\uǐ!V6bKɐGtj97sj]f[ .O|},?Uebm'F -`b`a#0gcƭ^ZiSi^>]ػmpe|_m'}%e#,_'SVVuڐд8wܓ 5g d>5ASm>I u݋V! بؾ;Ǡr; [[_klwneiLsܐzu*7 {vt nb`_"3o:j4?>> hfչH x=}50$R5;[<`H۾a#MdH xسfmz&yTǍ=!wj$gˆVW3d|cԿ!}yg4ʾLm 94*vM>,^1vj`Pwg}[b?X3b&'8{x~rJ}E}|rh*5;rշ;&w5+?.wgd#[OǍ=^<`H x7 k>5z0Oڿ9I@xMH;nnj Ou7]OuG+ƙy=P0W-TJ=IO{V 4ېڵ/J܍jf5䔛-k#o jEZonj+[X>@=<ju,F U5"1Z1b̬jD=>~<``5SR00O4ZdSR00OMi p9-Zv<IjT+<``&ۀ4oj*[fq0Od}GᰆC?{g`[M^i $,]#@ymTԀǍ'BH7m1nm`'Xƃ{]iqث2khlȲT[nWwHT#6/PnTj Zp-xQdzK;U7m[UtJݖl*:խ۪8O-R2hi jFcnj+9N5:B OQV7eMFmRZ;1YymoFWͰymTԇ1q9f":YJŶk?kX;5fثcԬeU|ċV l]SRǍ 4C,w%NK)XԽN[JlU>m+9Ym_69afxUlhjFϙ,?x%)?jM8S||RH xo{eb^c\^O`HH7ְY(j<``4 Tԍ 6anj Ϝ3I4ON'귰{ CeZv 9B> ݏx<4i !뾀 TԀnj k fpMH x<)7`i ?=5j<`` g3I9OAA)Z}kU5 1 HSnA{/ܶ@8?xb]KW{U5 1]5V %HsN/ϝ~Ad>>4u_-TԀnj u)5>Ϻ4㝌??~u v:wCw>]Z_Ej@c zˢ~Hw <O+_r~xۚ5;ryuSR00OG<49F f<=?4tKɺ^>b弻[TԀnj /=zo9J f< ?.^nʿ5_n+O>#j@c ƻ*}rΥho?7ۋi?B~{ߎS<+'Uc\ѩzϭ9K fW _~~e6?QwV},8?eq>I#ϧaPTԀnj Um+{Cvxt]to1{4?Oc!oSauO{C[sQU_'8eIN==V4#Kc[pnly:Oom{ouSR00Or{zZ+kui G1⟮=<3]ɿZrns7C[V'9`S?UK_u, fEAy}}Y2~i mrܿ|Ϫt?]n^ 7P知z/NmTԀnj 5/yj8}ڙߨUkn1SeNU 3n_z;mR~7ˇ,|ߕӿF9 TԀnj Ŵ#,i΋4>gM^gsiU5[I'yuS A\Q36be]ku}.hƃ*uSR00O?{[mk>uUrf6Ɵ_{ɫ%4k~SOW=cSR00OVk{ (YmboaV^鹀@$u9AyވUط5ľj<``0|ˆ@||(}4/p9 ,$j<``4 TԀnj  f.p0O5_?Ni6~5+ u=$y{l>1n frI .ǚvo TO  NU/g4~ͺ..X^IǎlI[@ @ -{~ +Wf TԀ  f<j߳̀ӻ_޲DLE@(ZL@  9L(f-0b ֔ e_[4ߍcTMoYVV˕vhE W' pzcT_L  &Q("@"HMb/ZD. B .-5b*LX *!H@$RSiiן^\Ă*VSx+)f LLB S"`IZ޳1 ^* E⩔@0޳uƭ(uLVJV`D^*&Bo7,p+M|Ξ;{;brt*RkL;g+}{6NyyG}8E⩚*%3Y.uR|3o= ·Rߟ#=~]m!Wi|B"YH=k=*y].K>xfV&`*Oucw:oakǩ]ܼtkݗ$VխRf@UP߶=6F;roeܕC]+[ZTHJBPJY `VSYJ@Dhf% MD!*PTt-PJ@@J^+hPR0^*'X$`+e⩐PyZJC zW&@Az\P ]|52U2*+W(;0zS\nBT⩐P|_y @0 4ۖqLf;oL 罎<=o38|]N-J1ߓEⓞ]@A^Qy[p}֔dˣBu⩐P|u{t_J535}Tm[^*'L/Z܀Lŧ &ToIa00@@J^* @Jȉ S ;0ZY =O_oo˿1U&[ɕzҳ1ҪJWwWC>Oc7ەǣ͝^ o/l5X@,H"AD$MRDXU0$ 4}0,R-`7"Ѐ`[H"V$U Hlc.@9BA[30o4n@yא7wë,V*,K^@H2kbaǎZ+B@yy om8Zpb8X<ΜBH1n``lz6}$,TXzh  zO5P`oţO׎9v mrSE}X7c|t5֕BB6y}x_>_jX76?^;^|Аh˿ޮf.۹t OwO;W !034"1@A256P`#$7p%&'FBEQ*cn'*|{tnױgOweRĝiw5:ꊤP ^2ʗ|)IS/'("PyzHV6_xX6BbwQvc4t-[ @)@piLQX1W3E7WeJ=k}2hE^FJ-F+)sסuM0h`9flPΚG3iE aEy I1k7ck37NLKUTInl$ڸZ!>2Z&)iwdKMdzR.aR>bJ~>>YmaUxE6W/l%odLr%#L3j%9I&sOsJiSkˢY]AJ,F>)56ZvEnD9SN;'SK 9 >=:k')H0j2z'ʐ`Zj%PIp\ӯ=Պxf\ M,+9d)a9f)cUYR)KQ!Hsp6LChFNWY;x#eJL9ơ/08]cEbvnҨ|y!7;gיOĚ1L\/f&lSQach1{Ae1ll߃Nf8tOADGUh#Z[6F*b,R.1ɼ-Ls-cQʕ5]6,[G4nͫT̚%*lPҐoGrPdDg{*F=]gtΎ*9eU|<=_L H3;wӈɅV^[tU[C+<33(Cz'ڻm3 y2)HhF'+6$o-YIݼau󳻈k%iG{$}*@1=1Po^̽Gh)"ΠJFڍLF1If" QQQQQQQQQQQQQQQQQQQQ1QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQYvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvnvn^4;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7;j7KM3%&~ڍڍڍڍڍH$AQK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK:tK!5eHSp8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&sg8&>-Z$[fgfgfgfgfgfgfgfgfgfgfgfgfbϦ).קY"" &D\[U2Ck]MFiox= g gCU%Пƾ~F} DԊ׭" HS$ո5n]{$h?A+=F;i&Zj_PD4vWMdNa4Ԗ==ܿUQm/EӵQϢTbOrB٭6Tnz1\7]Kk{p}3j%ֱjVS ٵ!^c-4MYMAL>`u U+aM K5|@\ , 11-UfBB#z Λo~!n9vdE 51,?n.6fl ]tdFYu>T͙"L6&f l3Gs}JHD(gK5o&_}c6^zھ ;`&3^s7 ˟4xd^t`ʸqie.f:n*Tˤ =Q~ĮRrfx[l\WB:JQfrD>!ȩd5q:"iS`/~'f⡱j3W`cX?kSmfv c6#RqJENYYHYWѩ&=?Ȑ^ے$+Wlqi8u * jNP29Fh?GHɭU|{JumɯhE7lDh)RDe;=,uNEQk7ZUnWޜ;I5h5W;@+DP1M?N 6L4i@:)lՓ&i ]Uz̙QjiltHehlϧg RrWjU"q(v:jenqV[ џtuNI I[:r#a$Nnψ䴏ai^\Z˴hՃEv0wV,4R>'pI>3oδ̞O̿|p-feY;U юoa}ѧcZbIemKfUmQ {Ӫke8ghzb 9%Lq as.uΰr(ΰ:X\ as.uΰ:X\ as.uΰ:X\ as.uΰ:X\ as.uΰ:X\ as.uΰ:X\ as.uΰ74㭺sbLGN~~~~~~~~~~~~7B]d`C!53pf7n 3pf7n 3pf7n 3pf7n 3pf7n 3pf7n 3pf7n 3pl8 g3L&p8 g0Pw,3kŒ=T{4B=O(ry!5mN.LOcE'KHzRItA?QĒIA ?! >r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>r(>*C%\P:8Ρu3qCgP:8sQ|9GQ|9GQ|9GQ|9GQ Hzv;\KLLJF1J1LǠ |}$A"Ck}tiI V2PfRIƠJ]bi91 !Zqqm9pW:zuQxVIUWa9VZso^8+[٫leB͜X1Tʄ{B|}}FE Ƕ4!#ܙ~0tm1U"+Jdh-mlQOghlx pn)U,Ed,: eN8TW!"I6gnf@NQ@PQ (TQbmLN)8騘FT@1>y;( řNt_2SJ:=rʫjy'7= ?Aa8;ŧpA~DpUd|R,aLXaH!P!hqaP2.S)4\hЌȡJQdݵ#3))kA=bH(eTmePn;%-A*?z l/.wx1w iùtΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:cL|鏝1>tΘ:c5X:@\ s.t΀:@\1>tΘ:cL|鏝1>tΘ:cL|鏀>%((w=f,IJ;䅍7!Bs8+d׻/2 ?W+OycVr/MB*O=a$%ĪaN=" b*E9)V0&Cvۮ.nm-MD5jcEHz{\9iYMN%LA %8x(D*L*Cx$!YfU~ ayVĵH ,m%'$xm24Uh,F妡2=1xlя-iGbKELII"E%S9#RgjUvde2YXG"_$J,t>vZV˼p%^9t[MW`灱FRALLD0R7$=f„;Τ)&$ٳ(];xƗZ`u"4)U$EZ̬yZ~iDV~J+E_ * %tu>ɤzmmɱ-m]jr7FYq :ųquz+ͨZ$jO/dTr a9~,_%#/EN3$IEY !5g_ #g:v|#g:v|#g:v|#g:{lgY.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.py8] w.76Tems'ۺdl dY`˥st$=f ^`3Q9 [`2Ck $2PB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DE $DPB@DN'9 /~CkjD-;d-/c ҕ`^ey#hȐJ K_+vKd~NRNV!iqخe-K['4R2F\C,+?~?!vtqٺ8+֭$=fu"W^ UGOP_3J-t 9 %C߰&I0|0n%E#]JcuIFGS0б6 }V-d̗j]$2p+!nת-9 ys3yShhp.Q WJKmv$usNFlc^ҥj_67[tM[P2|Uޭ0#x*Ԫ3ӻ{jv#|s/{rR Gt/gzIYCaƦhw{5䔛jHH#mJV,3 T{cĴ$Rn#ISS1^K$=fpؘD0ʫdxMd"'eJI죣pNaR.c#гI kUZ9JDVHhbiqNֆk;iX֌ve4ZZ5m8.Ñ$")t^$dn딥ʳl锳s:Hk<ĵ)ڏZ&4J9eidN!ZA2;Pm9ny0zCrB%]lIO7Z"ClyXkR/,΢ra$ahp@7*WZ2Cky05h:d ݭQj 2S4j{wrNQ[YD<Yݒ~ )vܔZaVSnvIG݆\4:k%XJI{gdJj[B/W-Sx:-&xUys]Ycnc+Z{4N7ZZN u8r]+9ԩ⍕q~?!HzvbNVl0't݂yh,hEl`ŶNOɣwnAuhV$-te_`f:sU )1V_O&)5Iܕ9^gU% Sz?)YiBͬt= DqIU&8TPAe ykd-+2y<#d |%֫Q$*Ŕ\Ӊ (֬I4}*'$o#t7l ToN$OY*ͱ#%e"d}v{ Qr!b}a7ktUFLRP&E Ht"2Χ"~MYaZ4N%E&& [d郩Jjl0Q(X3 4סkak(XIJ;V ui' t5I(FNh$A\MWfvdlhEѣ#ŜrnUn"_YW8fykL \;)HP(GD;o i<"N'%] 8wNq;'y8wNq;'yD,C}9#Cdi P>q8Ϝg3|>q8Ϝg3|>mD<Y#z~dZz(ٴCV#KJ]-vR~. #7+FHk<I$dȻc(zDWbaDǏYiiݦN_ˡZ-HcI*ՒJ.b)Y5ѕ+V%=zs/0xiyyFƟ2&Qg^IFZn &iʼn0:W ?X4U)eX9Y9{{{Mm}f2iDQڄekTWC g&BLlVE^N?r87Do`V)uZ,TIfw~ jZOg.|LlnNKMvPxNrmt{0`b6JDD眘$H,u%$):BQ)dAwn0nqfzE%V<$kŴآ"AQd]q%S\U0D"y]l92$%!-^fJ{Aj]A@D *i <=_O l:Jl&NflRcGaqvN=ΙKGJiTsC\YI!NۓJ杚Jw _Fi5>O,c궇vN:cE9[*+UmA1e"AA!B@RM*m."c쐛~+P&0 -$۰Y0 :N mdYľ{ž[qt!g :?-Pw}n P,CK⇓r0' L*lC8lդ ^5W$=fpZ wK|,UDWګ8ڹ&B_5r[-3X;`ȟk8[tP嘢*jn/$9vHo5\C!,ְ e)qm*-lk}ݸc;k(̛"T_/ 8ej Q PJ~ԉbGkS庐Gɸ™=<d]z}fJ:퐒lg#d̸r!;MaWg4)H1<ё Gݞ/:3c\q6n@?1Ki%VLAbћA.K~r8˜e֛!` TsɣLU2|3*\.q>1y,%@0|.q8˜eịȃl֐, V#S26Zz` EtVe1S\#e2s\.q8˜eiS2S8˜e2s\.q8˜e5Sd߲?!HzvH]oƯ.fͥZ uN%y,pL M4E8,Z-+dM ӹtUw{x bq% BR.Ϟvr,Õϙ{"/j Z߁C R6 d\ȟkScO&QؾWua_ |62k嚫(=_k<\I\jP#f 1&WYx[k2E1lm>#\ e<d]"Ck}D-Q5dCn]&\ ^xw<%öSܷxE$[92sZ`m t(Rhö昴8t\bk%LO |&B5A|*wl.D%Du 4d**7 5o? =PU}YOe}vD;NZ$Wb"JÒ],[cLgX:ƙ4αu3icLgX:ƙ4αu3icLMt*[߰ lu,iwQ7.d&0("l(ɇ.RaGrrʗshYD?9h+/lCy 6TsʪDŕOb&Ry_~/1[1"~kNbe&k |*ma Vn2MlY +Zׅ_8mcl[G6W7k"pS|kH|WŴ95b#sn!mo C.1w s!XSʗP O]l`;f&lC 㤚k,vYa@+O^qIn@;Yxpk(*-+dL*Hfmۂ w*,T@l kr;8uT xkaܖlUE28[#?vKy5⃟ tH^؟Ԥ1+LN/ n42[Sʐk\G,〖7sP>ȻcDn>y1bփWBmF!Zu~OQOhԹ5B29Z\PI+ymUE㨼z=7XZEn_~57_6CuC!MpYY ,E+eTՒ@D[1 pPr-uʇ8OT6ڸ>[ ] ]_*eh!ܜ)ۆ@xW)u$>[UhFw盪BO9]W*ע-^C_tPD|bA$=fTL^ j̄XV$CD:tGQ(V묗n]:@$Vfg t!@it ߰ Dk+e u|vO vO0!+fqܚ(X+pP!)&7In8oQ5vPe9^jaXqx+q95Ushhs&B13,oN\4I;sr5W#-I"~b)w a*ζA!C.P'u@nC&C|tGsHl ԸȦYpwm|5ıSöƟ6XL( (pm)r 5]U$[Cʬ o 99b,Vɰ^|O&Kv.Y8 |\IdflZi\BYG-~5I-.b~a6 w\p$t4Qo$[E&99o "%#$u/E#ITܴ?|U[k@s2"ي205&% F0lPAWcd%^3ĆyG^.$ڪ&A٣mMZb/KPխ"4bkث?!ܶ\T/*Lc2CkF91jt6aɿQ]mFUEE;Uv VoxP%\nlՌڱV3j|mXU3/՟ՌڱV1̫XXMF0CNs[ 6n\VP~6gՌڱ:;՞7|V% *Nx鎢k~$V|V1ղ g ZUJV%L+(}HR+GՌo0d+Rdfżq۔k<5 r&q6K6v8j|mXύat-VmtCLX&CxX˩K&Hgjnl2X4\"2lWZ$LN9~{;#1-1RV8+qGM7onUU]5!j3+9dlQg(.՟1k.%v܇QbZ&͞7o9sx3ƈ!WKBOS, cZGl{L2r+ "CJ,h D7M:zՂp3E1NR32p6U㒪I1ESh1?pg s/cJw%;G1Ḵ" 2.S1˦w,Uo/f1scj%#1)M!CJCfё)?\nW@0xcqmJF]Fn7|m\02+Qtvv0J3RH=`tfj3f!Ɛ]qrrpf$S:%܅ڕ6ZLN?i1b9]HF1QݧiSYc;xɂp,j0z?!Hzv뼔N<^~]8i]d5;.GC߰4E7MQTN-'NJyZjsw;%3vFYfhslϹ=q̊ue+ #Qԛ4Ԃ `qas2wvEj= v,UlRJC Sjm?kEv1(Em# y骪3v9W ( dluqt wJ&րƱN${[ztbЍeY-x gXwǫ&7IHR ֜QG*eMΨJ;YT;Q/#޷^>-BOqtHZnA|R,iE$Ë:FCxc tf cX͐3CȐퟯC#O;)7*Z+Ide[LhԷj?}{ԁ%$ QJn2a,{cih_ػ&9L|K

BW9 !\+srBW9 !\+srBg `{XvʆY*bEa$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ax$Ed+2 m =fu60+(HZ u.sCns1v=Wws=ԛL Wk^ݧB[Rut\uj 꼅N*j9v/gcԊӍ<}zSiS 66ΉtMmlgD:&6ΉtMmlgD:&6ΉtMmlgD:&6ΉtMmlgD:&6ΉtMmlgD:&6ΉtMmlgD:&з΅tMo|D:&7ΉtMo|D:&7ΉtMo|D:&7ΉtMo|D:&7ΉtMo|D:&7ΉtMo|D:&7Ήr};]e\vUi{1Q&W ^U&7EQҖjï>!Ei7̖5iq? A+8 ,4v)%h=jZ| չ= 8,c 8,c 8,c 8,c 8,c 8,c 8,c 8,c 8,c 8,c 8,c 8,cb4pr1` pX1` pX1` pX1` pX1` pX1` pX1` pX1` pX1` pX1` pX1` pX1` qH"Qj zFQ98J+rya5_YaG{ܣɸG2 \mv~6; U 7Yhl-`vS/Od߰w,MF}zƞgHee̷:;Cth}GDFZd-Ϋ$_/rWh뎇v˝C?C^bmڏgұ^lV궝ԣlVʗ+/\=6?=`:wtgfS^ISbtMswՃ5"DqG{?Rf!( 5-U{wU_iUkK+릦Dgw9-2TڍvZ;/liraԚDbf+kMKkKR֘h Aje)OѿiN RABp2vE-uw7hVt+m9 XPow5nqp2_y$=fp􈦷IKL\V+Df '  C>QΆեV&4fb5њJ5Rz'YF28r-3p g::ZAiiL^ FZ1W`fD?R;B\G]fU0d7˥"*"ZON[\Ѹ-A[)T%J$~Vաث#,?ejXǡNRҩ=>QRg\iwWmIZliqR֚͵9\h^\u\i-o⸹ʚ6+V4:*ICȐ{܍w}?!ܸOVZY~r?!Hz~=9aݩ1H?fo~#n QK"OtGu f]?4ŵ5At+u[&GNИ?!Hz~=9C ǬQט*qH>G{u>NAHwZ-Ůrd? q,pOOXc\G6-cOmL%GlCȐ{r1r:1dܪ%S~3{yvyi?֊q *`9 ceNUl•Q q⌬!nkK{_A8C9W876GD<Ya:՘݃w[n)WCoKf;ݾG9(ݎDگO誇r8](43b9vYXV6jb bFڌ WO,l^>_.ՕG @$=fpP;sTեo*-^fV3{y59򺒷yĿZ!O`zcSli@1 d1>kQEiJ֌Lի+UHY03Qy}8D&MJ_HЇFly_~taKV q蘌z@٩C$mnj߿v,`Ԕ[T3M=BD%'ehxp5E V@\տ /UCfH DL*eA FS氢KJivh!5<}>PEwU m/ !' u w}s-u/"ywp_}DcF6.kOʨի[5RܒgRk:y9_ Yln ֖nsڀPU -R6GD<YaB€M'\RUܤlxdj,7e+C b&oc"J 6jǼ`}ll~B w}֩"ev̩,5eEIT]2p~9{}[f<cŧ5zyNsٓ] 11e"-C+OlEQSE,7z*9m?ˀ#J6..B:rS#D߰brOY=`"%g1J)Đc+Fx ۖ(RqɄ %!$" 7nTș]ȑS&D ہ"ҩ&K%\ 7nd ہ"ܢnԃ"D ہ"vHp$@ݸ n 7nD ہ"vHp$@ݸ n 7naoÿpg>&5 ,P[ dIf1r@DR w~SL\vHp$@ݸ n)4`&@Hp$@ݸ n 7nD ۬Ԋ2iWMf .pWd2|R/9J# 52?!Hz~=4q d˓o =cgl5seyu,+KG{h?N{foYsM`''V-Kkݦ5抸zeCS&OHllIO=yq!Nx†rHaCZ9)Zr-Q*:rJdlhu4qnjV՘6Fv_!hE@$=fpɟ٧cl!=X߃fJmF"?fo~#4~:k{^Nն{DUe4=Efq&ŷ.[,0mѳ}S{i?!Hz~=9P3[(#u<G HCe9s6rsnݱo5"oc߿vP) sh! }QbAδ!%Ld[ȫd(56\݋*i`"?9iGD~ pAp'-VVj 8X>i߅4e1M4`ONae zs(,Zjkoyi 6g>&@M=S 8c!!O0|ly_~rn]M-/o|\g[ {Zi7ûͧ9șLu;;ݾ_!,qŵ.ب\tK>6OM?2zIcWʨƵ:LRG9Z\$)=Mo!VQXQzz}g֟+(_&i4AVU1'W- Ŧީoԟ :Kh뜃ѿsMjz2\,@0}dCȐ{s0QvHh!7.hT5%s"n&n9)dhn^ljsxG{kb4`,q;<="( N(L:`٬ff.s9UA_s}KC[|hwUN~b.&U %3K*UD~):^pR۩CT}I&DM2] GKL0Evn@$=fp@0ɏQRuyLj$3FʧNT>jP{8?%;0rK[Ʉ|z ]PnJ, {%m7ww}|sJKPeԤU SX(iՓ}l^%R!ov]ݮ0zI]VjHdj\jbr߅AXT] LH̔e5iؘ*8>LkS46U\CQ -HI]+roU|=A;tͨ:hc h{@`}dCȐ{r*O-RoXˣspDJo?;K.wIS)>io|ś$>Ü!Y [tS/aRWNe!cD{y%Jv5ys3ӛ*UtgՊkOXh\K˯ϫ ]z>Y >3ޫ mM7v&@$*TVuM@HRJ4҂5mIHt[rS.G(5,]=.46GD<Ya?Vxp4 5VeЂΨ>!̑t@fTo=K=[i AkW)Xvo ӂ D!3ltN]ҭ=-TP(!%5WdI*~-ԳN燖L毐ɜԾJR[жly_~͝rbqRJLѥxU3pp] r]2!?Cρ:4k5[ИGe]f=8bY3zB]R)7.s쾏&%bMF|1w"i%\%\57(ף.X5ZX[PcxHF|p t2X8fbMw}ebA&(ǧ5@Ѡ6]:L' V@32S !CNzDqR&FQd;&d";:RXE%U;Ӊd(xrђ6]]%DLꙁ$BJ8PKD!SPse!F9WEQܗnB>?!Hz~=6N"X`x7VhI$HM ;)-8<05~EQP^]Du$\9#6R#&q()E֍Ȑ⃜rUH 8G4~i78UM;7R(;dj+SA*ٔ@%THȲf/$ ju-R3tV.sdzPso~#1Eۢb]nd2":ǧB$c|G. 5i6f|k7|m9OU#`W7iGpif u$ (AOb':PwJ"SbsjUE]24ș.Gɪ(t졵 2VQQ'$BD튙@"PL#ZjiJd.H3jjI@QHNKm>m\ly_~þЯ,ػYDfDZ1xd"#"9r J6eP1|3PV#sxG6H5)x:g0j4#(ԦiV"ֺZ^ȪpcfLY t8òpeb2)+*vO,HqӒ+a TsAIRjF,bjjH_PMHllV$a)HR2>9G}L6Z&%~dzvGűD4|ţ=ٝ.M#D߰^Av}4Hay# RE8dmuiIeg.)dZ^VU*K;]E/KѪ.@4!S9hShѿ,t䏚/K+&YdkVb4PV]QIѪrSV-7*Ԛ J_f Kgtѵlq@JRTpWrN(q͢^5#THsKp)j F$\b)$d9{ĩUT!"J5R9 J DXRK B[)h',2pZaoqjIF.(JJH۳ dɠf13t-&)T _ Lz׉ɚD7QZukvPlocj׫P*ڰ5,2cR;(x3^u $nLA7(h-ƌ NKʴNSOu2V2{MJ^W5A{wT;|?!olgD:64pȩۇaMôy ,zlmШЃ*/gn*ZtLͪ N$I䣷n+n-h$eR˫;JI ]pŒ1f;`r[B2QˣŻ;La %T ]XYl*JUpUV*$|{ym  w8n$ @!YHjġf'd"QݨqV͞H=Nf0k`U,`ִ#2uMFӉ9gJϯzCȖQ_~ob !1Q "AR02Saq#@3BTUbr$CP`s%4VcpDEet6d ?mHX)^J7˗pN]OUkgI Tx5$#P~#岖\^ ;k[-V$vcYZv| NLrAVSInm.sQwbA`>~'$r?&|  T_w) ai 4܎%Z_Zcp,$lyg 3/SC,o1ߝ3*tr I<ಊ98?Y(eԫ(VMp=9*&$ zF+B|׋&o`jY& ` J0 'ZYKmB"/)+*'H\V)闝hGj2&u˿) Mhga"fusZ֏ym4<9HEԶN bRcqyN9a71S-Fh#x2|jYݷd+dUSBX7\72cuC`gKx[oYڻX"\b@Z7+5*CZd7m-@agXGmI`=>j>TbstIDdFΥ[*HY}uO$Lɕd穋m;I/N AvpzԯŵܵŢH R~\ @ m#nBcwJ $sI2yZ7Zj\g!Aan+B[M*,hFpUʉZxIClw:ܮmS08EDEUU+qpJkԎHhwyHwq x<>nsR;i6q3qKx`ypl7$3voS6GT?uNݓT͵aKroXonv쟺nv쟺nv쟺nv쟺nv쟺nv쟺nv쟺nv쟺nv쟺nv쟺~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'۲~Ꝼ'OToSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSf;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vOSd;vORZfMBàu'۲~Ꝼ'۲~Ꝼ'YO-3& !a:NݓT?uNݓT?uNݓx<{E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQJ)E(RQuIYn'R~Y;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6nY;6n1#U-V%;!c p+;Hs+;Hs+;Hs+;Hs+;Hs+;Hs+;Lpmqz(#yDQ2 l}@ZNayg$Τrlv`)BɗG:1VՑ'pJ6WV0.P|Y9;kh^i_cs.Iu:umv8+վ&XnxvDIl{x?OLWE[_MHÛ ֛SbrY1fc?.=?;-༟y[U]s }F= ho}GQhl-yQO' !5iZYCSJaOG>[u-OPM̽ hVN 7#QX;a1 ,/'YVr9Q_sl+7>_>-XYw&?;Oκ%9xmgvvV2:eMB˳Mri>iBBſ4n-2Ww͖&>6[88meMbf%$cI i0hVnp(ÁZIjv763lO򕗨֛ƀ/--F7Z۴x3yF v7sI"o9DSoO209PJ\{ǖ.bdZG k:|WKo"L? iGQ{Ul*Atk63ܤbG+wT7{PBVAaaS#v /o0#S*G1wy:m9Ky^M6_[.r#ÎLqWJa9A$ \GD 4=,ǘ/NOb_)L[6sZ͍l)sq-)@nbk.#=Hըڥq$h<#K{m世a[v\k/-q8'mjsZH"9CЈVe!"/+:'&[q%y>jeи7SV\?_HqcsEPN dyiO}Au dlCmn&ԠG`֕1Ï5'E2 EOZW% G%=l=#ԸRҿËӓFVk9x>5szQpU>+HbV;I-t3=p$ ҵYuy3ai%O n1\6%t4,q3+rHUUqH ,hlg@G@ZZyFdHߨo#BJEnX蜼QBLwqZZ_Kߥt;x)bG-pzlgvpwh^-BYd aUS$%G-8p7 ےy4y8?}wP"KT̞BdmN K,iɢ.J6qƁѢ;1PmQTU̗kX%Dp uZ-ŭ6c]a]16#*OU哅عIpK &|J88=&wc,)ԧ.xY}]C5S|׵iuk{1=OFhZ S71dB]Ir;j0Udqu Y6Nj n31({l㸖H$H AF e A5wd`[`ڦIjg>''KԶo?o/O?o/O?o/O?o/M>~9w[ DžZuce; !c. r\ٝIi5wBɫ?ԭ"gzIʌcnQFWbj}C;}C;}C;>s9e_Pe_P%_P%_P%_P%_P%_P%_P%_P%_P%_P% _P% _P% _P% _P% _P% _P% _P% _P% _P% _P% _P% _P% _P% _P% R m[i,7] w[:߼u6puy냭Ϳ\om:~o[:߼u6puyB` sv5/(llm }x=1B (PB (PB (PB (PB (PB (PB (PCRQJ)E(RQJ)E(-\ڔ)@iJDQ(8Zb8dm9T_|-}:qoy lZ6,X1ȧ#x Eً 7b8E$I Ce񺷍)0d(DYE/ϚoME) dt3*x k$R)GG Z;/8POwAV'yԽ.K_:qS?דsʬ({ [4pJ%mY-J3Vr;6 Ei#ݐ|zqB bTdX`9WMe9m$a;I릵2]*Ü.\ia(vs"i&i^W6ssԖDnV˸IAh[:_b[y^S 8ubtXWYGC%Io$SI"~$;6a` *9}`b;bF%O5$>q*A!ڌNp~r5V<784RE:JUsV<Ϟ_1B:hHZOd$ HprͻrV%T*$ibo*1O]N1X-9y5k1!bc Ӝ(tJ|YѢE@cǓq*8dg42d$槳$>rV ʇ~QżbߡIX#0ې7}F"ػN`u5F}:h*t?K9);g&`g"OOܑ_a%mA@0 A*9yjxl g\g-Pe-2PHU/G1o9/K:Vqpd$S[GNJ@#E2[^P`8VP2]"EV?1t78Aʐo,MՅͿ[GDymF.om[9=,E3#I#8da4I)B;Prg+hm<eiwrBꫛdvwe06 na`7EKDA9  c>~zDJ|TDrrW7% e$ 8'M<^ WFjepcW۾U%&eؙq2F'1G%&',!3ƤF͝42G0䑺y w_&$X2(s9y4 Dq1VԮf9"vG:d+*`}FxkEiEW8ΞR23..99Hys+ y-(̏ 9iO᱕p~o5\N$# ȹ#95< <"21.=\\ 1+d>a_MK,ٷ6\읠s, w$m Dq  sUo,R20̨A \\D` 4~dXq$~nj䢟˱_$d9e+s#jȭ8'yo9-ݞ8}O1m99pqX oDY8~uk;7R1<8H9qիYK}e9[*;ۖGd浅=c(e|dmC _V6n2fNM%I7s `z žkAmmn*G0%jaVXǶ(uiYV?#i_")k!F9OO//g;7vM(jppW_6Gky-%TC VtH 5]Z͟OϟOwѿ޿ѷ>S_ԽEf8[keU|n 23Wj?M|s﷧]R{ٔTW{f%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}Wf%\K횸5q/j_lľ٫}WfCOwIGQҤ]Oi-DŽ I» g W xun f-j:VbmN+H2F90$g$uI|#wBkFyrSϿ P>fxt~ceeLyhC)2pۼ8%YNBBĚ7P:}ݠ7?#zVkm '2$`W=YsP_Ms 2Fgie6*cRO g '7j%-M1x8xqshUwkLXoEsdA j97U{%}[gFJw8]R{k-#}T4LQѼ2NpEj|"n^^JxM%\r2"Ą+Zf.bwEPX?=$ַqtQkotn.@IMs"_&HqnRѨuն-/LpbEtNH\.<$~[`$"*@ݚ J8ݯgr!V71ăkP,Y\;F]vLVx̚W2Lxbʱ$njnrV9Kf4IߟՖ9[MZA4ܢލIwGPC$j1uR֬=*wKH]PŞik:po2ʢW(hԕ@Vz:VIѠx'mz;V44Pο%,j˅O Bwvq\ŻW fM@OLo1gߴǐZΥpw%V%2*gh7KJLծ#kgh]=LU޷sBMMn]y`G1櫓qKm2#!oOKȹ,7A`dV !g26\fyFgp 1''OwgFe2C?9cV$2`"85esݒ[[?y,o¹I7ۛYMܓʡG5X8WX%y4uPK+KUIt^RC L2$%5s9աhofea cpE,M,b6hK'r*#m6 rBVof8*AVp@"Oj&*%$^c2Ola+l.zMkKu9'aW!?y"jvye"G$q `D ^S fYbhNIPp3['is_Z eD}6  tW’LaHYW)&N-f2r\g)wmjQYJ+3<Ј 3+ 4_4)OS|k\"gӡ3 (GL 9Q\ݤ/84FHhTQ(i\-KIv`0!g@ϙXV/w%v2(H&IPj^K| ?̓ȹuH/VY._d3>0VZ$O1Gv 2|?ۑov2T7Fsy\  33*%0d$^&5lDsakH!ԴXHrxrS'ѯKk *Kw' iLwgܟ(th# {~+OW2 DC !h?tl+ƅŻʲɍpsR_p[1cx_9x .n\ |yO-i^awGqK5Ӻ#@=nM_XCrS)TEѮ /{p^Pd%綽* uMpV.uvVV5B/gp&[=/L7\9R{(I:9X،kXiCg%DQlZ7R2=ռr$+EP.9>{WK#'}pXѦ͍{l +x@1&|/i3i2F!#MZ~luX.@Fd."r Vwqh3ѿ5>c_ e?eY?t\Hǚ%D3SCgaCy6ekk+mA h#<3WpkAg &GZ]:Ɲ|ɾg Aqڬ.m-Vハit jqt<2 ;Y4njHX$srDϒ[KL:foi%2I'!.Sr=A룮\"LI`gװ!r09 0Uְjc2D#XT v'-KJ,ܢH;ʌ0Z$6|wA%nPA+@k)xxDoDbb]GQ/L$Z̶vKE>NF*\\i7՜',70l>W>q\KopѼ<t2JP M[",am,)<ڤrCp⴫n߁sx2tKlEX5ŘLWA!FW '5AZ<;?I?8~m6F; r3{d86ܵ$K3w70[y^8/%gg1o>U[ Ռ(5RE*H*=GOwHzrK* *$QEDAU('@8ˉcwH [&SX* 5{{=Q\^ijEaIjygiZi眃$0m :-\C[ pG85f-rrO'SKWit+͝Ǚw'ibV%-Vh) #g% kPaY ѢRP,@ F]]vU9k:iE" h@~pR;IDM]Sr0s `V}Mqos4خbЃ#v \{,JJ#H';.&q#^`0S;EmB,V5 8HR2ԂYYe`u;DiQ0d,ȃRc ˷zA"d0V|-.aBB<1#kBӳm 0??_Q{TxBT $Q&?8KW~_ߞGzcn"Tf,5fBB=.=h׹!OPCڟ? Aj~5=j{S!OPCڟ? Aj~5=j{SF:UH/ѯi|/7!~fvq>=jYN:P?{}@ֽ׾Z^kP?{}@ֽ׾Z^kP?{N >>x __:qz#q@ex`y%vQa"\܋[{Ҭ;-e# MkgA1FIȕ-PyY: Y4: -z7Ұ_#jAceM<|í@sZ]v, ;uGxTncSIjŜCoF%ջ8&O϶hv4 WǑ?p[x;m.{s,ɂJFΡp_SEe 6.]ǧ\-ގ+jF]:Qmy}qʘ1A۰ W H:E|k{Sǥj:7RfL7rQYfkV{{BI C-ƇIuJi#7v \$} /m$P t?G]K.{dFwuI7׋V^lRGI m@yOk]ؤup-غXHUM W dHx1> mlGpyNwzU-]ob{[ k{V$6$1I,=*D@aC%ErIs4LpA*yF*IxDK8;5l,S}$iZf 3]Snv5?޼HDvb IEYtHw֠c })2Ty y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y Xo9XCei`8~?\C<שz!MLr!MjWqqI &ˆ# P}/:RW8u [u,WMQO%ȆT3՚h wigRd'='<]R{$dHo5n~v6kO9o@A0ık)p)m^9k.a![971~pb&c=aRGbSsZWwDf1rR^0p^>UTHZ֮W,܆D_%qPeHW mg**KS+R:.dXHN~t (=fIzn%f2bbG\#{Mr9In><*4Cáy/NR?) }kbKĆK +]ba~RK !¥DO{s ԋh|@eQ֙OaC)墂]ۙN'>o\gs)}&8gQdDev\KLxHߨt>؏FvH_$̪+ZVG[戱ÕbMhM/e_:o T+1QU2HΤ4)XL騭1$@kK^HʁWޠq# S~LԣpVHd]G$.|T@ͩ3 "z[۴sgc$اĵy\Em)!8@}A9$A״Z7)$J@Neb &UUgcЪ<`dqGSbÒ}*9gڜNsx&o.۵I&hRMh"G'Y"xoevXbSC&ԭyil<䙲>XjV:$T1$p71Yiw,RfVG6CϣCNIA8wA#tfT+rT7fK I2U"?mEE?G?eMgkk>fO7Y[Th.!̜}Y{ Oќ\ʶֹ*H8PJOl|CԘQxkj>bA2&QվF9,R[iV!9G`T&2z9TG[7MD<679 5RW5EH̘ X)IJYsGҳxDէ'0]3Mq!GyhTsjE O-2$-_`R%u(cd>`0O5M>ouhg- *̀ QX ֯64" ˗$ quIs7AnNY;fdnNY;fdnNY;fdnNY;fdQYS2g`}_!K qiMt_•i<gc4c'_Ɏ##SP~Wٛ5#p|Y}|]gr> J(?ltxLY'kw!آḎW6J>ՈzY?وȯi'_ӦSϊp]QV[- ϨWbkeS_C[G%&OiY~c?$2JƾԊ+ '"d,K ɭ-bKryxH]ְ&}BnKMb\"T> PΙ 3NܾI$땇ze?A?Uy$?#-,U;u^8Я כF?̠_KFOI'$}`DGO';3z qyOb?-̶Pz6.ƒKt0kMlqsR 5@9 穝kYEC_f*}D#%ͺ/X?V'~kxKq̥%ɸ V\$'Ow!U0v p+P]BHZt2S L*15i6kuE +\#-#4*d@Prk ԭG>eUrIG3=13!*K k8 >K2oWswtM}gpT] kO4hѮr@27Q}4ycClnXQ} ) 4hq)4hѣѭZ`ѣoAꁊ< 4p o&) yŴ "~Vs4b4y+%4~}/(ѦM2$qw"{iF[Re 4h:\G<$jGh(ѣF+Uan,`ͬ1[XWXA_ќEK7m&:25Qfɺ%TwZ)4\k{{{AnVHnܪެ ։կ9[["K-GoSLsG׾kkx lĘ~ BEed݂?kSZvk\Ap Q1O-@ F"Fz2,O%UgDxo쫟n3'pbhc3: j>б/k#ωк'gP+Iv4]\J] PSc.qJ[Z(&Ĺ<߷op>'=I%t OϭZduz}@z XzBst5.^[:ͫ~<])0C|YѯR{XG6Cq뎺[<0DZv=H'Kf?f.5>+^8R#?Y'IbuT-db5КգKX:LG$tI*WQT?oتs_ӡSsU~e^!mf4r^]-#K}^ۑ^q| :aOJ[z(>O͵gm8UBiD0zƱM.b#^j(nl,{r9 J w#pxqʉjlU>ث}WPb]C튺u*lU>ث}WPb]C튺u*lT Xm8"SP㓹=yzcXQԮYL}m8LljВǵ^.r,O_]8OPa_8Zcs?g 16xM&~JGھ:]^\87gr5gkk6Wec]]ۡI^}Ne]h5O_]:֏|COUFGu$L\[\7R&8ŞivΉia#],;Vmffp$S<}V}XxL ?>pZsn򃋦Gj'_R] ]zkukRQz ޷X\LY8ܤz`=5?N=Oye=c55ڟ=߬}#~٪7ZMk0/Kp|ZM>~N7>SXz2jF5Q|u᫤ƿR?H<~X'DӏK[])5K Ľ$]G)ƺFl?$>eӏہϫ7[D}j>K=w۝sqo46:ΘJﭷn7R_PyRkZ7\$.Mqb{n^OyvPВBxIgw\C+5NZ-ĈKbz!ᎩM=͜XmxfEW=fW7̸0Nmej"Կ)]nWB(Lw/:5먖\#M#Gԥyr!x&2Ňy"s̫lM-^4:٣D!U`[/JWQM{a dyG6@Gwd紮βW߯ڀ>1?=~ZRՉؖH^c,|OPr4DzۡKvqe@U6Bg5C:|2EYĒNXWDr@ǵ_f|+h@kqq&?Ǟ>ul4:=@ϭխXsG4(C?S6PZo=h hz~&mҋ;hɭM/qmkR˓ՠiNMUm#sV>rpBďWìR#C99`q`@9lj(sV@k@6ޝL 0z8m(R34nnS,ʺD95FڸBA c5 C/76?`lgDDΤH#n+tmmA [ey'PXҺ0fXeed8x TuIc,PZlRxn_# Y-악 E?lPv?PnṈ*/ӡYgT !8كw"Ԋ *M9Sђ/$[H@Zi2(z|c8+K%mm)󤸸*xF [QɴkЅ)C 0;ž`P hFOU>HSν1_=6R{x]<\=_o+/cK34Odc` 1^{Xd,}2B@yY m>"Z?1!:sZyFVW1!*(nm_X#0C ѡ;0!$QH"oc6lby@#% \qC-Oe@նE4d66+D095j?Wb?c\i7 $Jo1q-d,']`ЕohQ=}Uŝ<2Ǟ0˝j儮pШs ܏=Ay7W#AH4p?uO-Zc\@#j *vw;/͐թɗOL jekS/Z~[iC'!&7KթɗOL jekS/Q] w0oV.ҩIK# j9 *6#|R0V'$XEDseE*W舟:$:?25ɗOL _4֭Kk(00/PLX_@'#T|eXtn:ZpEsz^ͻWӧ^#"V/K>tybpIZ[;x5bر]VkLlYqMG/fwTrmQٷuG/fwTrmQٷu9V5K-_!סц 82ED[XդyH,rzI?DF&).7)+6EI- &âay$XDYQ KzN+Tn"l9UP@>Y\"/jig)\4"l18& G8  DC#IVbp>jx7 c;ZgĄ'5sou $ !u  2PS ,)u6p<󚸂H"?qSzqlFdD8>$) ;*:H[!mN۩䝊DZFq ; Vc%e[ҩN #BM{cS# p3'Uۿ̋ӊ(.&H#5{jr ۴%q֩G5ꑠ,sIF? o=OyYXql 袹 `#T)(IQpx`2Ir`}+r.#;0,jd`FzIz)`eܲ#BϑEjMem{:P3P@x$Yܹb w8U$ +I GUպe 8.ĤdAuq BD88+hP:h9$ z@gT KEvE sZ܈2)A 2IwM$R%nz\<mo$lK4F|Wy"H %q֩G5_[^EȻRLz*h7K3Aլ*^xdABn>?ѯR{l݂ %x+h9"xQeNH&s2PyW VZ[P3ڏ"xPoWn3Z12ܛ.NZ X]tq`$љm帇HGCIkg#xcr2E*#tLr"˔'lܣLɵ?7}uajn4k $+ɠUFMX\Z[GlxdV$(W"0zG7?7iwܬ|Z0;dVvOgsڙ$-أvۻnYMA[7Aoȸm"!K۹6s{%WQov43*nncY QtaEe>aV>Pxr-rq.m帇HGCIkg#xbi,GEey5;qmK*+9ߕ R(LKr:M[ MBFr._8-thCÜsXO$7qݹkJxҮa lF2i4,?Qo$2 #B$n|Hhd\4r(eaA9I Y8ʮZ5w:Z|a sxVL8 nb.6@m$GF֑oЮn.fX'.R  <>IariI/(,rF^ я= Y)uS?ѯR{&Vxc*yUWv71e܎WVk[f1>5!3<2iʯ?RG6i~~ݧ6v\i H[h'#<}6 dh\?C!#9]R{XG6i~~Sl}Cݷ_DnqKIxCFW18bGF~iыx,wZQZ^uYhV(]%v2خN4~v Gq<PfoSI 1ǁq:fMnP ֿok0ӓNҭFjS&FwRk^+>թt>;׿4A9Hҽ?'kՌ\Ե<wv?H9+zƝ»{)"dT9Wojr4ɯ ]AVH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) R AH) #'n$4RDHt2\!R˪^$,[|(USW 8G>C>s % Xg55xAsqs-xk%;x/ p85s >+`T6?fihPe#U՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜՜լhfE GE'#Ē/ux̫P!iTH$ &rzv703m-/n#ķe6d%H& <]R{XG6Y}"~ס/_cf #[Pۻs֭J5 -9SC1eDqy#DEHn;sgOwRq<=x{#TGS?Od~*U<=x{#TGS?Od~*U<=DaB??VFM48Q̀$?`~:``r>>ouqm#4A5+kn VGLgdI(Q5;x;Ȥ՜ H{"zū֛7@!&Ej W;3ɓ-Ӛ5 2<ۇl [*aݿӷڵ_$XD›k-fYux-(i,eL̵-Z@,nKU,;Og&-戉y#(1 m];l+ag*[G"et7ԯ5'Q8ɭVFҬ.`MG(]7Ռv?f1 O9 rI5~SF4mNXZnXZ۞ VJK [$< %kT)-by9`J#n@mH\%5N;K{k*Djp[X>M䤓 nX#9(禟C\ Тi=$.83m kwV|% =YZy z-m ̀H"k6H4A g3V^I((ג#d(-OawAr GWvնY^k64͛+C`W=Xi#V]0Y]2.AH5pR!ь## EqF1Qj613D8Vw n 5,"k$JޝjD[jCi8;qqͬ2;و5ͼtY. 睫 ŭCTiZ%upεK9OE&4K:,Sea4:x/;|Ae}X+qjX\-Vgsm(?Z|$t5Y9O.Y 2} כRτ؋y"HAy!ֵiz5G Fhk|T> n}E2r(p*it_^.D#( k#o4h`72n"Jc`QWt`H3fݹ>kzEtd1.YX_i7+;wHl4}^ kH95-$pzWNӮy+_ wSS_UM^IyeIo#I-JZƷ_kbUYtGڠvwM,pMЈ-򹇛隮m5*YF7a֫Vkmr5WPM9-o5]m .%x%l kP-܇9+LoT9ZյLC5ȖF0!PA sֵF} 7)3$، 9nLӯ 톋szd-gC' jQ6\%.o$,]\Bv`/76AvB,V53szqu 0^ކdW2[Rܥi0SmeGrl\!ņ1JYVl@`ԺW } j=,]<^Tq5j:ޝ}qcA^xobv jյgUXo)س8MVGwo~z> 9v5K%ssY L y._OQ q{$rJb= 8wgee eWz=å]_7<)"iwu_t-2BZ^{FI IౘD9>IВ-R6A ( p6CR|%կ^!2O$)XyGUCiwW!$l},b!c8%ķGDE{m->ۿmpw#BK6 4 ^~ס/_ijI&5Èn&E죬~ס7@&ϐInEa'H Uť͹Q,7*s:|5X,j 7163sZ|t^QXJ.QX<2,yS$6:y+.^$: AjЈy 4~w2o7rw'GaeY!swM(PT> }fI%~GyAXm9bɴ2vfQb\^Ftcs֣lnvJ|:)3P 't{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/G845Bld9E[ifmH,axeYRYmcqt!Eo#KG8PK5Zc-I/ IZӕ[bK<9Qq߁M'u,.I wztR{Ku/GԽwR{Ku/GԽwR{Ku/GԽwR{Ku/G&Oo_:~m#FcJOM? kЇ3C!)?V5COwH-4BOIzxs[&&7.Ѹ'S] WrqȩY%GbH~~?c`PNS<+~ͥSOZ!Nf^ X'*m @?#p6:v0 `7XkQm练w^ۇo+jl\$E`~=\k{HcF8V=߬}#|7ǎ].e@zO0^+zSOZ! `h Ď6x ?t0 X9'2}"Xw70Qys.gpJB"9Xn"m"P|#9}R曟󽏑n |SPrpzLD=߬}#|_`T"Qp9?7Q^=45B[ ~I%6?I'L$CƺOwgSWCd0#dSe-q>KKp6MsE6z.~_x.5[fSM.$ 05krhG43YJHѯR{XG6cې;6< -J6&6wSOZ!2>7CaO"Coz쟲+"&p k6n iZb|t" /ZmYxM#/ eKMF{]HDWUjSDb 3F.}}D6GѯR{XG6o?}> x?va~L=45BX_yHC__ Kk_-dMu="jbX…8k Ϧڱ?ѯR{XG6c̠n?_x"s%ʻ+smFyn?zik^?{\>>ח8  i\'6rFj=Q­K n5V!rechnH A 8 W9_ XHq'g>r<\xI f4bh7pi:fo/eR؊ͥӎ?_:~m#D1y׌xNA_cz;gh>C禓6 l Uv8bh(/وteBG%/Pt^.eKY u <^n +z8 ڹpZd_84 p#?"cXӁѻ\?_:~m_ oe&ǦQSOZ!f[Cyύsb.ͦ^iڒ~_tNK ^m.ʷ5GRE:w4˱hG !"홏F]W-pW;>I2k85COwH=K, Ċ)dʣq1 V7 rQ j,`8{Dl3ʱn" +<0HO1wePRyw:2*< }Ԟ#ґG4d~x)?7[CۈlA2 3AK HwR I;a^v;t{C晋zacݎ^~/GԽwPcݎ^v;N{z=cݎ^v;z=cݎ^v;z=cݎ^v;z=cݎ^v;qzik^?{Þt.o-1'IqOBb1LjyM[Ypc)gdFPaO՜c%ɐu/98m z\$ɮ4|԰' $+K; t>b9I)z=cݎ^v;z=J_Ku/GԽwR{Ku/GԾWMLP8gab8]H"=m#as\Ƕ~Ln=R[)y3X~ cHHQO[+I:F4 (i床ո^ZAr~ >)<&6#zgXR{Vc.%nh-qȿPI)yxZ̘4<=߬}#|qy%n?qWp=5Y78A|Ï;d~zmo &2E͒cq#)"31`7S;k,!d^.$5F/ =>Ҏ@=u7]9(hX: -ea_v~9Ѳ#&_fvVJ B3JXp)W@՛Jc yQt:Ƚzv c ~*Lf_5COwH-a)Ge7I=r@H-t_f0W(<`WYWAx~? @=:[{D^?5?G!U`f8ֽ~_}0-dGYGtw7M|GK6I]*{ah.UO} /ЦV '#XߘVhJi O ~T\Xb#q5i/ 9c0FPd'H' Ȏ u1a{oEjWѿ'c_zѨ ӬW>KPJ'Wd  ɑdrBKsvnXPui4;S##k'8n?_:~m6zqC5N>Ͳ }OTz0}8?Uynjs)+.WXX#cPmѾֽ~_}N$}b@PI";hz1"Jh5U.|~ CN. 􄁧 Ac78~?/Fno];8"hjf(㒷?)iAKÉs n!:'-^M1)bL`OYŽ3 VYOZ!H O@EBin)V<D'$XD3 uyrJ`CV"qI$OwMvC!?Ak4N@8uvOiH'J|$7=)&-h%{jX[i(èLG~@~>S ^Q^cK۩ Cw46":JwjsxWQ+:l k|T> [goq#ڿGQZ) dg溶`q}}bY8>' d#Zgm:G!Lڄm>lkZCU' QsOZ!O_Dm*6i/jGC1C늣& CdpVFjՠR[ÿ(H_Y{iVWEA!rsCL8+N|۝;lW.3 rW .FM0uH-5lAuAbNL?\6,͏;´sHL \ y[i$ցޮ Tsb::bsq&PB?c~m)7ZG\I1\J( /LN d#Sqa0y¶ph7>ZpG/d2pFlnqWKB=D#-&Oah=iFGGzƒct G` PzN :ceXZ0z/1TqXRtqU=߬}#|Ih??[dpM_Κ5v!c ' )d9QqEE̳Ho9xB#{i1EP)vZMxs\1)4Yq do#~ i2h1j1BHNMtKUq*dQdeac9!y]}l/R;\'V\޴33A6ü~.v"v@YtĴ'j꼠|uVPM-̳@'s`h"od(_zrI%Cn099z# @UĈlf89K1ad$$6GP@ R-M-S\TEwؤ ygȸ$FH}:S?M? kЇC08pX t.1?V*U3uu_竔xᘝb[AlS Zkzg |ᕛR2!"Is P"*C="Gc] ֡m>z^[^jwÎp]-PRk@JH>"[ z'bpLI;20` uik0Crr=fͦ`nGC (o[{h68ƙ 3Åvu$=ԟAk&> %6PFNs/{*Jxg0 dOuQWwc칼Ypq{n({j sFc :xgxLBC͑ŠC5 %g*^-&Ԃ.r qd ,$Pg\L:[#("QP0+H{7@R+GX lA ʒ#Miʫ~H"\tUZZ NMrrI9$$1ȃ+v?Y&[DJUZZSr$$O[n?_:~m)쯮/aHܴ.…2s'8jo[}:A@9ۊ12ٝ9W=Kj<IS_oGE<VwpKɓӺ8+9A"O,3 $1 H{(H-r+GogGogN2̱ԉ_ qy9D oo&!6$:y@vvGJ14=e* 8NRL^Q;HaюThҨP/^jqH§ViQFb|Ĝ֔mc1YjeHiԩT邼Eh8?Z@@/,aEhV tE5l`($3iTM*d+Mi$sX=X|mFPt ѕm˖9'+@1jUDUT= nF̱@Ό` hCsv4YFF>j]>)!_/ Nh˵#۸Vr}eQ'[Ei:Hc}ܒ29k/M? kЇUl$ET Nן滀4 2Nz[5=ܷ5qH.Fdjv{o_6[ !%U_CwBC >P? H1D:t%NGO}ԍvyId(# 1$ !h)f9Ouu`)rJI%ݠ~ס']ڮ mhbde6 |z՘[AKFrgTVNQ998JgGk[ {d@~"WWq[L;2I.˽[ׂ^Mr[^Yc̀qW3^Qȍ5A?&d>iJ]3FQhbwU1zX系xfy-Q_),Q\0Njs5T8Q`qԮn4!/6q4׹i҉vՆC `d mkH$J4|FGqWO5"Vf,EB.p_P Ga!FJkyoݹ9;\ZIk!L9xB=g-rs5COwH'4cwN5;nQ=n@5Z -ɉL̀d 㺔b[D}LdСayQcQ8{HY5!@稐̊ʒ62Q=nT ?)";yL''ar2 hguWin7#ٳ'ȣglu(ɩR8WDiupiCE"X)o,ws O̖: I }3Zdc a `Kid'h9%Xbar*H]p8GKO5ksK@cWId Յs+¼*RiKz1?gMH#zm7;ǻeR(Pn ayT04<ʰԩVG,WK2LXm9eT8穙DצRd1#9Pdsg&I,rEq凇Z̲c\y$>::eG 9$ڣg0xO&o'kt[Xlɸ禵>Q5S/%ޝvܘb{7"$D]I 5zs]%Ƃ[!Bn <dF5~7r^ ӞW۹oZ'ksb-D"cgGwl(OĹI`f>YOjxd*cXf`F\,Hµ]RX#"!..rw6*deRprH$3YL#usd^5͗0},椋Ȃh!aBepz1 Q@`?mq)#>o(6kn$ך )[-DT9u7Kq8<䃴cB-a~Xֽ~Kmh%eFVk1KovS"vp6sC.qnnT l`ٷ$6sRN1LFF0UIR|[yؐ"B+YybKj@;7(F+R`j l4ן.91)3 WLͼtl"8T,#`A\ֻenKҤha("^Vr+_t6+]8Y!.M X]kܤP[LvqrvfOIIx \%eþ@Z.8Sm rQG|?'!//H/M? kЇѯQ !1QAq 3ar"02Rbs4@BCPSct#$5TD`d?c %I,zӌG9AHU(鳒zJA'θ#NG"ICؤA؎#H d7|7`L O\)n`"Dݢ1.!4HH1~(DF?n/n8 OP:߂25N]AJ2zvAШ:BT Pt*AШ:BT Pt*AШ:BT Pt*AШ:BT Pt*AШ:BT Pt*AШ:BT Pt*AШ:BT Pt*AШ:BTњ*J*J*J*J*J*JbXV+bXV+bXV+bXV)U6995I_ʿܿCg*rU ??W9@blu劓g je:#71;NR7[M $@y\=ج(c  CiOQ61lcZ]0&=y*qؓ W$\2:îIjØA1W0rA*ѽSV1${AvozSt-5I0h#܋\3iMczI'y*>M1 3KLbS-U}"DLUmTiTM H>gvV˞ ɼw&֤ 3 MLUissEWcO0gO.'q%yT$&@+5.ݙLeӐ7E^4&ƜKKMHnVn-Mƅ0? |暍$aJgHMt6RqtLidOӼlL9Lu[.Bi\ۣۨX&L>|Ź8{UGߨd}Wu7odѩ/' N `P#UB%/wM5FfLF:۞Å6'y%Sm--ĜI>^m=*Um4M/{Qcp **5*h 6h弡5 cuuT90Q3f e63$6XD=j&bnP'3[5َ(a{GaEF ,4XhaE ,4XhaE ,4XhaE Vhme4}{-+o a_h [CGQG)/ssq$p#%       =C>nFXlh aayާGLaXwC}hf',.cdo{̄.qƐ(Pn5XjaU V5XjaU V5XjaU V5XjaU V5XjaU V5XjkWyU5XjaU V5 bqNk@1a;ށ!2^/6F_%$ׇ` 6@:pN[HӬe4lʩLya&ckOa Ns|L6>* OH>O_1Phs긌APZG&TQQ\MgBPA#,Ojt=r<9ēlg3tn[|(g |LqY[Q ?|̣beoG42i啾e+|?/VOO^ѡRJ*TRJ*TRJ*TRJ*TRJ*TRJ*TRh܆~. . . . . . . . . . . . .hc1NF)$Pw L{?88ss@C=d2oC /d&$&yƛL n'@ݗ|*n-҆-a4^P/L =4K4yu O;G(ǝy85e DRn8I<58w~۷w1hwDBC$aਲ53Â,ť殆/ $*HDgsI933@+)"`9&( b>VVw Dʺd {`wA;>Ő1v:)*<āZ#V\W;QuHdVO#ƽ7p:D07vFĘh{*Fhg5s 莯ic#>n + N .X'c#+!@$^JeHOnB`K]@of g9 3<47I9ŦuO |kAC=w lϘ3wJ"6D* * * * * * * * * * ;|PB lRk>#Isgfzgp+Q vp(3g}۟V}Y Tiuѐ%%]ŧ9 %rbuȄ13in{ =*Tis[uZ\bSXւJ 4$fk)o/lkK$SeN;Zį&1 u1"0G;|z\w$GS~#NBDh^[|8j!8^O.{'~ݚ )S>{8gV قD@t!ZU @IS 6ܘ!f>yхy6Զ9b ` 8`첉zSŧ߱ DPVL .aTޣW@@ >CSjoU`^9&dǽ9mylˠHp43.j{NÞ` `XlP}[z*=1vUIX<'f^innYs}DRZSO]Fh}G7WOT8aY:v)pj޳>ᲀ^/V\>;볈T2֡ГU׊3u~W 7EWS}x&10֓!"_OFrs#c:&ԔhO"Z};izn(599diYF@K!dCFh#Myir(1&6=$ HgFJu+ԯ7RJu+ԯ7S^nDF;* Q%pyy8*N0jBQ/PCEL3)\ZZӔ Bh N{[PW)wޔ\ M{Co UԹN . Vִ&LǶbTE \ἪUD1ĉKKˤeB QPҝQˍ\OFC 7pT|ցv:k9& Um@\`cef'Sjopc IQ(3bW؎7c߿a&DcAל J"' ހ! @#eIC3lRJ*vEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEDs<Ƃ "[ bGq!D2fӈD`26oI C#;4myU*{^qWU*{^qWU*{^qR*{^qWU*{^qWU*{^qWvӃ@(g:uQ֣GZ{5$oNkAϘ3cx@O Ld/!Q?iOt #}q0NIش7xnz`fB|/ |:3`a;4m̗GT>b]E2DØ7KOz bT4GoG)Koَ 48oNHɍǏ934oNg5ԁ{Yv'!;=#dBMytP3`Yz4vQԙUÄVyg5Czv|dNfov1=n Rt 8dQvA~?3v?I? TSr[,Պ.sLo۝t: >oNϟL㶏;n3mnA=n N=EQ#BSV|ß0g5awr66`Uz =^T 1ӡ DuW7?S̹Myl?U* .>+X*d)tThco,w|ט?ބog,aMZ>n\gW-V3 :}}yo>K~oNϙ vY&;heSyL7 oX/M['ZO@v #"l9~^.e%i?[ YQ\W|Sr)?<䙽h\0KoNϟf߱Vs-yӶu*8;[@&8xԪjL[&o;|!Nqg?H{n6pXüß0g5ɿSV[ ٿ-c`>S .S1e*x.B3ɖs+}~LlWu+\<.@ tMvVwUx&KƆ1Ǩ~kVq g7BYy&j=Fp Nܨrri ~x ß0g5z?tէjVxR?}к&T#_ FO+MSh5Ku')Ҭ_ErQXru@0IWCg*:_*o]<k$og(ʓרPv}F"W}JQ2ݵS;nU8ֆ}UhtNJ.*};!ScgUH*|SEu9~^.aa>{k5uL+Q%s'x.G Yɕ78]y:T*= EUc*|LhVaqZ* aBJMB-**7* ߊ-s_B~(' N*E3,}A*fd:gѽ;>s*?㹿@*87rk B74ޢO?ԆUxr> F\[VuipTFvB\L =(<v?I? ieY W`u[@4]hm>%s;عa9.Gӫʏ֗PLI[B3B;]ja4;R!m)m&D'8Okff=\k$=oNϝU)ʗހ0OЅP !?;7pqT(Y=p> j4q T\<8\F%X)%( hĹ2?PZܣoco\$wwjrUkq־b9NRՆ -9fr;#JpmfEd׭}+Uhk;{\$7gΫFqnugD8pD<81xqک.TatzF MlGdDfF&E@Qa5lw|ט0 & 5|"z*  ݁99#ZΫy^V j7Oq Ꭸ֮dyg?SzqW 9!V8Tp'Etި:^V|aQT5QV1H+BίM32jaLmX9לZC` E|WrP o HDbZjW\3@3=r⤢g7gunx[ ӑ1ϥ9P !Q1A 23aqr"4@Ts0BPRb$CDSc#5`t%d?ֶ͍`<:l^۱mlكfֹ86wg?=+MT-ƶLJoK94-bLIc5NjeFk! }myдmML̪?/׳,76PS(liպ]Vl5Ye 5dRFcAiG8*u) gva ӍM* B37! ~n :`hFaxNa7!2' XVFMkkC P>Y겵 S-{)4@Re3jЧY9' FU,W@HHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHR1R1R$^bbHHR1R2TEW\|C =_)oPOc}z{=S^6KcC x:ˣ|gPȣF$H 6ҝcRb$"|t90HԈ ::n$k.1$4sі7mlozi;c`BimBm-1)$sKsn*D+ZveGSNZL"+T_wJ"} is %6E4:t=TaV3s<啷јY0N 8p&&_@IenZ3[R֬T2^0^I3FiUlSpnz~iv !YVAF*tƩ҄'daڄ0ƃ54fA!0y k1~ (QYk ]rhR[7G-jUZyq|) )Hl33n־oC"*vT沑Γ7m|:#OR $Qth&ssP+SH1X@,D^2'lZy@3wiS D_S3^W14\RatD4(M$YE3iF4Aw,fM#m cCZ5 (2\Tu-sRJU}OY>ۯgkn}OY>ۯgj6`cKiǴnD*;Q'=Οe]Ku.ԺR]Ku.ԺR]Ku.ԺR]Ku.ԺR]Ku.ԺR]Ku.ԺVuـkl}i 91ZM0&;]M撎M`;*g\Y|_22FI`9@F< E9usIOkϲnܦ?>T'8GSe-fq7 I" s %9U7Tt0jJ`s, DfxJu6]ګԴK@"Lȝi599 jVLÆo@޲:sƶtr XrwJ<Gbǐzc:ԈOcg.u'.1 Q{AkI>ǯgRfuguX'L\B~XP3C5;=NiZӟE`U2U^۶BQ1A0%RsV39o d-6gifˊ3_?RTa{ CI5#D: 7v4” wHQ6`=i$sDxsKv' CGpѽJ'P$@t#9~V=ZQOU:-J7:nRݶ?Zl~K vj/|-c^[Tm֩{nRݶ?Zl~K vj/|-c^[Tm֩{nRݶ?Zl~K vj/|-c^[Tm֩{nRݶ?Zl~K vj/|-c^[Tm֩{nRݶ?Zl~K vj/|-c^[Tm֩{S6f<\.[x` P0 @(` P0 @(` P0 @(` P0 @(` P0 @(`mJ: traʅu@껽FCk+\cr- K\f#ǎjj3"@07滂A^`L y$4I0$$^3~L$Hx\$ 97lދ787\NIA'z;㐛{ `:#?$D" FF{"32a%.םuߒ َqLT4?xxd({-k9ƉU.A:d`6)tsTqe*@Mx&ΘtxUtyφByJ0]MAr6p3dS@B%28gFh}F5׈$RjfJ]Jd ˁ8X!@nk4h{dOG{F`}0yR}ķgPÐ7@*G*wbw8_*n5 7مhxtj]/C<x\%GO~9O(SVW٪\*C7WwxڜGὲq9#E@? |4fuU01?:42o_vC7 9-79=S9GU8 bq/Cq {mV(> Ѣ'?~{udpY oxsU.9<3O}p8uE;퓍=0$Ah%I*œ㐫7ߒvgE :>ےSS8]'y}UIcBq/8]~ a Wu:E-j)GTxڼVy[`9{CW<tƞd>ngx?9ώ[/ '%C:rV3|&NT΁Nܬjr@U&oO@Oo@qG+O2}P< R§O䟂ꕽ¬niM+wV mJTn=;] Dr©63K^=cv+wV ;m W*DZ*D6_$ۍV4wf͉Q@9AxOqcx:ZBlul{i jS7Mu2j٫ڳ#U}Ooiimֆuvcp[>uIAhD²]]\6^!P wV FJ n[1Lj lI4b[.:HlKgBeZ6e %Z66Y]5^@ɕWcHم}V t7*n.gɑa7bͭRqkD.{-5lk g9ZYN`q2sO؋gpk"ҚZ[xFf,{*T$H<+v˩԰Ж9Si wV*[Ic-Y6jTBB\tQRkFAI67b3A *gcҥJO[Zh)Ӛ)1K1!: >'Hze:,#8G֗LjaGR:knϡqIЎ>iqFmix=ev!&g{=-:FC~hC~g8e}Z,|+nkYމJQit8J$";ziw=mZ-WΨ4>ZD7FU:ٌ'-EۿyGϭ]崼ӡU AQ?ZbbCJ]@ B{-y@%Z5+=hW%ά iN@ ri. ͙G-EۿyK-dsȭEV4[x5؉ذ1 .ŐmըgH+gT[C=xe'|Ԇ_)[w[흼u?ϊgd୏7 =O`MCj=0 mQAJs}*ik6q9b{a`oH+g[ G( ڮB d~hC|V9J4x֒;9]B{r UըIVJcCgXLihua=uViM=Q3JDx*l5:iYb8 ThS9~!oOt=Lᷤjux-1oy!`UWx8Z->T 0 5g9mvO|-O*U3Q_ܫ5]J OhFy/N"0?S]iAKCS?SX{?1JජYi4] z}æ)6_ck+?薖*&. "o@$qM\ j|SmW/?7pr`jhl*jԨ Q;=[M+1*TFw [i *|:?V.'_uFZ| _ ]E֒/ 7G_Hsl߅XDY*N4Pj緰g#h'5Y(Rm/QǬzL.[(oO]javU0'k}7DOx H:OUp2Րz@*LuJӧXCl$a=UM ҳ\w4[{,H2&T`;ȷUM_Q:Z*TڬY?´mN.h6**frbU13úUy¬)8`OVVF4]ij;EK)7E%YqUsOu㒷SPޟCݏUN%ި7 Cii1cWٺEcYy NҎ+4&ZhYi3-ꅱuYg\&4VP}ZOՖitk~*UZ =nPh9"UFdάX*>ky?5\us*Ҡ6.@  k)dUmVJ\2a]ϾkXhٍv_ɅQXTa$H# kfp8Ț˃A\?ngn!Aڵ3AkP+5u6F1$HQ ]zFI70?z.|yi IֶB̮k IISXA'39g']R%AID5)DV 9*_M9A sXD!nzgb4(M'BPDyBf3*i7ImTkl)ӗ/PmK&A5gsiS%mnҍ*D^Ɛ9:XXpB#mm|{Lɬkx-pCy֗:\#!ݐLO$q'SL} k *lj[ӍXy0DBUTֹH@) FC}RBvNya*@RHZ0*`OBpYhٳ&v ki 77TFJ@k (DrB(ޝHocԬdGr 7^python-elasticsearch-9.1.1/noxfile.py000066400000000000000000000104471506101734100176750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import nox SOURCE_DIR = os.path.dirname(os.path.abspath(__file__)) SOURCE_FILES = ( "docs/sphinx/conf.py", "noxfile.py", "elasticsearch/", "test_elasticsearch/", "utils/", ) # Allow building aiohttp when no wheels are available (eg. for recent Python versions) INSTALL_ENV = {"AIOHTTP_NO_EXTENSIONS": "1"} def pytest_argv(): junit_xml = os.path.join(SOURCE_DIR, "junit", "elasticsearch-py-junit.xml") return [ "pytest", "--cov-report=term-missing", "--cov=elasticsearch", f"--junitxml={junit_xml}", "--log-level=DEBUG", "--cache-clear", "-vv", ] @nox.session(python=["3.9", "3.10", "3.11", "3.12", "3.13"]) def test(session): session.install("-e", ".[dev]", env=INSTALL_ENV, silent=False) session.run(*pytest_argv(), *session.posargs) @nox.session(python=["3.9", "3.13"]) def test_otel(session): session.install( ".[dev]", "opentelemetry-api", "opentelemetry-sdk", silent=False, ) argv = pytest_argv() + ["-m", "otel"] session.run(*argv, env={"TEST_WITH_OTEL": "1"}) @nox.session() def format(session): session.install( ".", "black~=25.0", "isort~=6.0", "flynt", "unasync>=0.6.0", "jinja2" ) session.run("python", "utils/run-unasync.py") session.run("python", "utils/run-unasync-dsl.py") session.run("python", "utils/dsl-generator.py", env={"PYTHONPATH": "./"}) session.run("isort", "--profile=black", *SOURCE_FILES) session.run("flynt", *SOURCE_FILES) session.run("black", *SOURCE_FILES) session.run("python", "utils/license-headers.py", "fix", *SOURCE_FILES) @nox.session() def lint(session): # Check that importing the client still works without optional dependencies session.install(".", env=INSTALL_ENV) session.run("python", "-c", "from elasticsearch import Elasticsearch") session.run("python", "-c", "from elasticsearch._otel import OpenTelemetry") session.install( "flake8", "black~=25.0", "mypy", "isort~=6.0", "types-requests", "types-python-dateutil", "unasync>=0.6.0", ) session.run("isort", "--check", "--profile=black", *SOURCE_FILES) session.run("black", "--check", *SOURCE_FILES) session.run("python", "utils/run-unasync.py", "--check") session.run("python", "utils/run-unasync-dsl.py", "--check") session.run("flake8", *SOURCE_FILES) session.run("python", "utils/license-headers.py", "check", *SOURCE_FILES) session.install(".[async,requests,orjson,pyarrow,vectorstore_mmr]", env=INSTALL_ENV) # Run mypy on the package, the type examples and the DSL examples session.run( "mypy", "--strict", "--implicit-reexport", "--explicit-package-bases", "--show-error-codes", "--enable-error-code=ignore-without-code", "elasticsearch/", "test_elasticsearch/test_types/", "examples/dsl/", ) # Make sure we don't require aiohttp to be installed for users to # receive type hint information from mypy. session.run("python", "-m", "pip", "uninstall", "--yes", "aiohttp") session.run( "mypy", "--strict", "--implicit-reexport", "--explicit-package-bases", "--show-error-codes", "elasticsearch/", "test_elasticsearch/test_types/sync_types.py", ) @nox.session() def docs(session): session.install(".[docs]") session.run( "sphinx-build", "-W", "docs/sphinx/", "docs/sphinx/_build", "-b", "html" ) python-elasticsearch-9.1.1/pyproject.toml000066400000000000000000000065701506101734100205750ustar00rootroot00000000000000[build-system] requires = ["hatchling"] build-backend = "hatchling.build" [project] name = "elasticsearch" description = "Python client for Elasticsearch" readme = "README.md" license = "Apache-2.0" requires-python = ">=3.9" authors = [ { name = "Elastic Client Library Maintainers", email = "client-libs@elastic.co" }, ] maintainers = [ { name = "Elastic Client Library Maintainers", email = "client-libs@elastic.co" }, ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] keywords = [ "elasticsearch", "elastic", "kibana", "mapping", "REST", "search", "client", "index", ] dynamic = ["version"] dependencies = [ "elastic-transport>=9.1.0,<10", "python-dateutil", "typing-extensions", ] [project.optional-dependencies] async = ["aiohttp>=3,<4"] requests = ["requests>=2.4.0, !=2.32.2, <3.0.0"] orjson = ["orjson>=3"] pyarrow = ["pyarrow>=1"] # Maximal Marginal Relevance (MMR) for search results vectorstore_mmr = ["numpy>=1", "simsimd>=3"] dev = [ "requests>=2, <3", "aiohttp", "pytest", "pytest-cov", "pytest-mock", "pytest-asyncio", "coverage", "jinja2", "python-dateutil", "unasync", "pyyaml>=5.4", "isort", "black", "twine", "build", "nox", "orjson", "numpy", "simsimd", "pyarrow", "pandas", "mapbox-vector-tile", "jinja2", "tqdm", "mypy", "pyright", "types-python-dateutil", "types-tqdm", ] docs = [ "sphinx", "sphinx-rtd-theme>=2.0", "sphinx-autodoc-typehints" ] [project.urls] Documentation = "https://elasticsearch-py.readthedocs.io/" Homepage = "https://github.com/elastic/elasticsearch-py" "Issue Tracker" = "https://github.com/elastic/elasticsearch-py/issues" "Source Code" = "https://github.com/elastic/elasticsearch-py" [tool.hatch.version] path = "elasticsearch/_version.py" pattern = "__versionstr__ = \"(?P[^']+)\"" [tool.hatch.build.targets.sdist] include = [ "/elasticsearch", "/CHANGELOG.md", "/CONTRIBUTING.md", "/LICENSE", "/NOTICE", "/README.md", "/setup.cfg", "/docs/sphinx", ] [tool.hatch.build.targets.wheel] packages = ["elasticsearch"] [tool.pytest.ini_options] junit_family = "legacy" xfail_strict = true markers = [ "otel", "sync: mark a test as performing I/O without asyncio.", ] filterwarnings = [ "ignore:Legacy index templates are deprecated in favor of composable templates.:elasticsearch.exceptions.ElasticsearchWarning", "ignore:datetime.datetime.utcfromtimestamp() is deprecated and scheduled for removal in a future version..*:DeprecationWarning", "default:enable_cleanup_closed ignored.*:DeprecationWarning", ] [tool.isort] profile = "black" [tool.coverage.report] exclude_lines = [ "raise NotImplementedError*", ] [tool.mypy] ignore_missing_imports = true python-elasticsearch-9.1.1/setup.cfg000066400000000000000000000000651506101734100174730ustar00rootroot00000000000000[flake8] ignore = E203, E266, E501, W503, E704, E741 python-elasticsearch-9.1.1/test_elasticsearch/000077500000000000000000000000001506101734100215225ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/__init__.py000066400000000000000000000014231506101734100236330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/conftest.py000066400000000000000000000025601506101734100237240ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Tuple import pytest from elasticsearch import Elasticsearch from .utils import CA_CERTS, es_url, es_version @pytest.fixture(scope="session") def elasticsearch_url(): try: return es_url() except RuntimeError as e: pytest.skip(str(e)) @pytest.fixture(scope="session") def ca_certs(): return CA_CERTS @pytest.fixture(scope="session") def elasticsearch_version(elasticsearch_url, ca_certs) -> Tuple[int, ...]: """Returns the version of the current Elasticsearch cluster""" return es_version(Elasticsearch(elasticsearch_url, ca_certs=ca_certs)) python-elasticsearch-9.1.1/test_elasticsearch/test_async/000077500000000000000000000000001506101734100236765ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_async/__init__.py000066400000000000000000000014231506101734100260070ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/000077500000000000000000000000001506101734100262435ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/__init__.py000066400000000000000000000014231506101734100303540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/conftest.py000066400000000000000000000033351506101734100304460ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import pytest_asyncio import elasticsearch from ...utils import CA_CERTS, wipe_cluster pytestmark = pytest.mark.asyncio @pytest_asyncio.fixture(scope="function") async def async_client_factory(elasticsearch_url): if not hasattr(elasticsearch, "AsyncElasticsearch"): pytest.skip("test requires 'AsyncElasticsearch' and aiohttp to be installed") # Unfortunately the asyncio client needs to be rebuilt every # test execution due to how pytest-asyncio manages # event loops (one per test!) client = None try: client = elasticsearch.AsyncElasticsearch(elasticsearch_url, ca_certs=CA_CERTS) yield client finally: if client: await client.close() @pytest.fixture(scope="function") def async_client(async_client_factory): try: yield async_client_factory finally: # Wipe the cluster clean after every test execution. wipe_cluster(async_client_factory) python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/test_clients.py000066400000000000000000000035501506101734100313200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest pytestmark = pytest.mark.asyncio @pytest.mark.parametrize("kwargs", [{"body": {"text": "привет"}}, {"text": "привет"}]) async def test_indices_analyze_unicode(async_client, kwargs): resp = await async_client.indices.analyze(**kwargs) assert resp == { "tokens": [ { "end_offset": 6, "position": 0, "start_offset": 0, "token": "привет", "type": "", } ] } async def test_bulk_works_with_string_body(async_client): docs = '{ "index" : { "_index" : "bulk_test_index", "_id" : "1" } }\n{"answer": 42}' response = await async_client.bulk(body=docs) assert response["errors"] is False assert len(response["items"]) == 1 async def test_bulk_works_with_bytestring_body(async_client): docs = ( b'{ "index" : { "_index" : "bulk_test_index", "_id" : "2" } }\n{"answer": 42}' ) response = await async_client.bulk(body=docs) assert response["errors"] is False assert len(response["items"]) == 1 python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/test_helpers.py000066400000000000000000001134331506101734100313230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import logging from datetime import datetime, timedelta, timezone from unittest.mock import MagicMock, call, patch import pytest import pytest_asyncio from elastic_transport import ApiResponseMeta, ObjectApiResponse from elasticsearch import helpers from elasticsearch.exceptions import ApiError from elasticsearch.helpers import ScanError pytestmark = [pytest.mark.asyncio] class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): return super().__call__(*args, **kwargs) def __await__(self): return self().__await__() class FailingBulkClient: def __init__( self, client, fail_at=(2,), fail_with=ApiError( message="Error!", body={}, meta=ApiResponseMeta( status=599, headers={}, http_version="1.1", duration=0, node=None ), ), ): self.client = client self._called = 0 self._fail_at = fail_at self.transport = client.transport self._fail_with = fail_with async def bulk(self, *args, **kwargs): self._called += 1 if self._called in self._fail_at: raise self._fail_with return await self.client.bulk(*args, **kwargs) def options(self, **_): return self class TestStreamingBulk: async def test_actions_remain_unchanged(self, async_client): actions = [{"_id": 1}, {"_id": 2}] async for ok, item in helpers.async_streaming_bulk( async_client, actions, index="test-index" ): assert ok assert [{"_id": 1}, {"_id": 2}] == actions async def test_all_documents_get_inserted(self, async_client): docs = [{"answer": x, "_id": x} for x in range(100)] async for ok, item in helpers.async_streaming_bulk( async_client, docs, index="test-index", refresh=True ): assert ok assert 100 == (await async_client.count(index="test-index"))["count"] assert {"answer": 42} == (await async_client.get(index="test-index", id=42))[ "_source" ] async def test_documents_data_types(self, async_client): async def async_gen(): for x in range(100): await asyncio.sleep(0) yield {"answer": x, "_id": x} def sync_gen(): for x in range(100): yield {"answer": x, "_id": x} async for ok, item in helpers.async_streaming_bulk( async_client, async_gen(), index="test-index", refresh=True ): assert ok assert 100 == (await async_client.count(index="test-index"))["count"] assert {"answer": 42} == (await async_client.get(index="test-index", id=42))[ "_source" ] await async_client.delete_by_query( index="test-index", body={"query": {"match_all": {}}} ) async for ok, item in helpers.async_streaming_bulk( async_client, sync_gen(), index="test-index", refresh=True ): assert ok assert 100 == (await async_client.count(index="test-index"))["count"] assert {"answer": 42} == (await async_client.get(index="test-index", id=42))[ "_source" ] async def test_all_errors_from_chunk_are_raised_on_failure(self, async_client): await async_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) try: async for ok, item in helpers.async_streaming_bulk( async_client, [{"a": "b"}, {"a": "c"}], index="i", raise_on_error=True ): assert ok except helpers.BulkIndexError as e: assert 2 == len(e.errors) else: assert False, "exception should have been raised" async def test_different_op_types(self, async_client): await async_client.index(index="i", id=45, body={}) await async_client.index(index="i", id=42, body={}) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_op_type": "delete", "_index": "i", "_id": 45}, {"_op_type": "update", "_index": "i", "_id": 42, "doc": {"answer": 42}}, ] async for ok, item in helpers.async_streaming_bulk(async_client, docs): assert ok assert not await async_client.exists(index="i", id=45) assert {"answer": 42} == (await async_client.get(index="i", id=42))["_source"] assert {"f": "v"} == (await async_client.get(index="i", id=47))["_source"] async def test_transport_error_can_becaught(self, async_client): failing_client = FailingBulkClient(async_client) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = [ x async for x in helpers.async_streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, ) ] assert 3 == len(results) assert [True, False, True] == [r[0] for r in results] exc = results[1][1]["index"].pop("exception") assert isinstance(exc, ApiError) assert 599 == exc.status_code assert { "index": { "_index": "i", "_id": 45, "data": {"f": "v"}, "error": "ApiError(599, 'Error!')", "status": 599, } } == results[1][1] async def test_rejected_documents_are_retried(self, async_client): failing_client = FailingBulkClient( async_client, fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = [ x async for x in helpers.async_streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0, ) ] assert 3 == len(results) assert [True, True, True] == [r[0] for r in results] await async_client.indices.refresh(index="i") res = await async_client.search(index="i") assert {"value": 3, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called async def test_rejected_documents_are_retried_at_most_max_retries_times( self, async_client ): failing_client = FailingBulkClient( async_client, fail_at=(1, 2), fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = [ x async for x in helpers.async_streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0, ) ] assert 3 == len(results) assert [False, True, True] == [r[0] for r in results] await async_client.indices.refresh(index="i") res = await async_client.search(index="i") assert {"value": 2, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called async def test_transport_error_is_raised_with_max_retries(self, async_client): failing_client = FailingBulkClient( async_client, fail_at=(1, 2, 3, 4), fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) async def streaming_bulk(): results = [ x async for x in helpers.async_streaming_bulk( failing_client, [{"a": 42}, {"a": 39}], raise_on_exception=True, max_retries=3, initial_backoff=0, ) ] return results with pytest.raises(ApiError): await streaming_bulk() assert 4 == failing_client._called async def test_connection_timeout_is_retried_with_retry_status_callback( self, async_client ): failing_client = FailingBulkClient( async_client, fail_with=ApiError( message="Connection timed out!", body={}, meta=ApiResponseMeta( status=522, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = [ x async for x in helpers.async_streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, retry_on_status=522, max_retries=1, initial_backoff=0, ) ] assert 3 == len(results) assert [True, True, True] == [r[0] for r in results] await async_client.indices.refresh(index="i") res = await async_client.search(index="i") assert {"value": 3, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called class TestBulk: async def test_bulk_works_with_single_item(self, async_client): docs = [{"answer": 42, "_id": 1}] success, failed = await helpers.async_bulk( async_client, docs, index="test-index", refresh=True ) assert 1 == success assert not failed assert 1 == (await async_client.count(index="test-index"))["count"] assert {"answer": 42} == (await async_client.get(index="test-index", id=1))[ "_source" ] async def test_all_documents_get_inserted(self, async_client): docs = [{"answer": x, "_id": x} for x in range(100)] success, failed = await helpers.async_bulk( async_client, docs, index="test-index", refresh=True ) assert 100 == success assert not failed assert 100 == (await async_client.count(index="test-index"))["count"] assert {"answer": 42} == (await async_client.get(index="test-index", id=42))[ "_source" ] async def test_stats_only_reports_numbers(self, async_client): docs = [{"answer": x} for x in range(100)] success, failed = await helpers.async_bulk( async_client, docs, index="test-index", refresh=True, stats_only=True ) assert 100 == success assert 0 == failed assert 100 == (await async_client.count(index="test-index"))["count"] async def test_errors_are_reported_correctly(self, async_client): await async_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) success, failed = await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c", "_id": 42}], index="i", raise_on_error=False, ) assert 1 == success assert 1 == len(failed) error = failed[0] assert "42" == error["index"]["_id"] assert "i" == error["index"]["_index"] print(error["index"]["error"]) assert error["index"]["error"]["type"] == "document_parsing_exception" async def test_error_is_raised(self, async_client): await async_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) with pytest.raises(helpers.BulkIndexError): await helpers.async_bulk(async_client, [{"a": 42}, {"a": "c"}], index="i") async def test_ignore_error_if_raised(self, async_client): # ignore the status code 400 in tuple await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=(400,) ) # ignore the status code 400 in list await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=[ 400, ], ) # ignore the status code 400 await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=400 ) # ignore only the status code in the `ignore_status` argument with pytest.raises(helpers.BulkIndexError): await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=(444,) ) # ignore transport error exception failing_client = FailingBulkClient(async_client) await helpers.async_bulk( failing_client, [{"a": 42}], index="i", ignore_status=(599,) ) async def test_errors_are_collected_properly(self, async_client): await async_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) success, failed = await helpers.async_bulk( async_client, [{"a": 42}, {"a": "c"}], index="i", stats_only=True, raise_on_error=False, ) assert 1 == success assert 1 == failed class MockScroll: def __init__(self): self.calls = [] async def __call__(self, *args, **kwargs): self.calls.append((args, kwargs)) if len(self.calls) == 1: return ObjectApiResponse( body={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": [{"scroll_data": 42}]}, }, meta=None, ) elif len(self.calls) == 2: return ObjectApiResponse( body={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, meta=None, ) else: raise Exception("no more responses") class MockResponse: def __init__(self, resp): self.resp = resp async def __call__(self, *args, **kwargs): return self.resp def __await__(self): return self().__await__() @pytest_asyncio.fixture(scope="function") async def scan_teardown(async_client): yield await async_client.clear_scroll(scroll_id="_all") class TestScan: async def test_order_can_be_preserved(self, async_client, scan_teardown): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append({"answer": x, "correct": x == 42}) await async_client.bulk(operations=bulk, refresh=True) docs = [ doc async for doc in helpers.async_scan( async_client, index="test_index", query={"sort": "answer"}, preserve_order=True, ) ] assert 100 == len(docs) assert list(map(str, range(100))) == list(d["_id"] for d in docs) assert list(range(100)) == list(d["_source"]["answer"] for d in docs) async def test_all_documents_are_read(self, async_client, scan_teardown): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append({"answer": x, "correct": x == 42}) await async_client.bulk(operations=bulk, refresh=True) docs = [ x async for x in helpers.async_scan(async_client, index="test_index", size=2) ] assert 100 == len(docs) assert set(map(str, range(100))) == {d["_id"] for d in docs} assert set(range(100)) == {d["_source"]["answer"] for d in docs} async def test_scroll_error(self, async_client, scan_teardown): bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) await async_client.bulk(operations=bulk, refresh=True) with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "scroll", MockScroll()), ): data = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=False, clear_scroll=False, ) ] assert len(data) == 3 assert data[-1] == {"scroll_data": 42} with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "scroll", MockScroll()), ): with pytest.raises(ScanError): data = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=True, clear_scroll=False, ) ] assert len(data) == 3 assert data[-1] == {"scroll_data": 42} async def test_initial_search_error(self, async_client, scan_teardown): with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "clear_scroll", new_callable=AsyncMock), ): with patch.object( async_client, "search", MockResponse( ObjectApiResponse( body={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ) ), ): with patch.object(async_client, "scroll", MockScroll()): data = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=False, ) ] assert data == [{"search_data": 1}, {"scroll_data": 42}] with patch.object( async_client, "search", MockResponse( ObjectApiResponse( body={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ) ), ): with patch.object(async_client, "scroll", MockScroll()) as mock_scroll: with pytest.raises(ScanError): data = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=True, ) ] assert data == [{"search_data": 1}] assert mock_scroll.calls == [] async def test_no_scroll_id_fast_route(self, async_client, scan_teardown): with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "scroll") as scroll_mock, patch.object( async_client, "search", MockResponse(ObjectApiResponse(body={"no": "_scroll_id"}, meta=None)), ), patch.object(async_client, "clear_scroll") as clear_mock, ): data = [ x async for x in helpers.async_scan(async_client, index="test_index") ] assert data == [] scroll_mock.assert_not_called() clear_mock.assert_not_called() async def test_logger( self, caplog: pytest.LogCaptureFixture, async_client, scan_teardown ): caplog.set_level(logging.WARNING, logger="elasticsearch.helpers") bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) await async_client.bulk(operations=bulk, refresh=True) with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "scroll", MockScroll()), ): _ = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=False, clear_scroll=False, ) ] assert caplog.messages == [ "Scroll request has only succeeded on 4 (+0 skipped) shards out of 5." ] caplog.clear() with ( patch.object(async_client, "options", return_value=async_client), patch.object(async_client, "scroll", MockScroll()), ): with pytest.raises(ScanError): _ = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, raise_on_error=True, clear_scroll=False, ) ] assert caplog.messages == [ "Scroll request has only succeeded on 4 (+0 skipped) shards out of 5." ] async def test_clear_scroll(self, async_client, scan_teardown): bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) await async_client.bulk(operations=bulk, refresh=True) with ( patch.object(async_client, "options", return_value=async_client), patch.object( async_client, "clear_scroll", wraps=async_client.clear_scroll ) as spy, ): _ = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2 ) ] spy.assert_called_once() spy.reset_mock() _ = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, clear_scroll=True ) ] spy.assert_called_once() spy.reset_mock() _ = [ x async for x in helpers.async_scan( async_client, index="test_index", size=2, clear_scroll=False ) ] spy.assert_not_called() @pytest.mark.parametrize( "kwargs", [ {"api_key": ("name", "value")}, {"http_auth": ("username", "password")}, {"headers": {"custom", "header"}}, ], ) async def test_scan_auth_kwargs_forwarded( self, async_client, scan_teardown, kwargs ): with ( patch.object(async_client, "options", return_value=async_client) as options, patch.object( async_client, "search", return_value=MockResponse( ObjectApiResponse( body={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ) ), ), ): with patch.object( async_client, "scroll", return_value=MockResponse( ObjectApiResponse( body={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, meta=None, ) ), ): with patch.object( async_client, "clear_scroll", return_value=MockResponse({}) ): data = [ x async for x in helpers.async_scan( async_client, index="test_index", **kwargs ) ] assert data == [{"search_data": 1}] if "http_auth" in kwargs: kwargs = {"basic_auth": kwargs.pop("http_auth")} assert options.call_args_list == [ call(request_timeout=None, **kwargs), call(ignore_status=404), ] async def test_scan_auth_kwargs_favor_scroll_kwargs_option( self, async_client, scan_teardown ): with ( patch.object(async_client, "options", return_value=async_client) as options, patch.object( async_client, "search", return_value=MockResponse( ObjectApiResponse( body={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ) ), ), ): with patch.object( async_client, "scroll", return_value=MockResponse( ObjectApiResponse( body={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, meta=None, ) ), ): with patch.object( async_client, "clear_scroll", return_value=MockResponse({}) ): data = [ x async for x in helpers.async_scan( async_client, index="test_index", headers={"not scroll": "kwargs"}, scroll_kwargs={ "headers": {"scroll": "kwargs"}, "sort": "asc", }, ) ] assert data == [{"search_data": 1}] # Assert that we see 'scroll_kwargs' options used instead of 'kwargs' assert options.call_args_list == [ call(request_timeout=None, headers={"not scroll": "kwargs"}), call(headers={"scroll": "kwargs"}), call(ignore_status=404), ] assert async_client.search.call_args_list == [ call(sort="_doc", index="test_index", scroll="5m", size=1000) ] assert async_client.scroll.call_args_list == [ call(scroll_id="scroll_id", scroll="5m", sort="asc") ] assert async_client.clear_scroll.call_args_list == [ call(scroll_id="scroll_id") ] @pytest.mark.parametrize( "scan_kwargs", [ {"from": 1}, {"from_": 1}, {"query": {"from": 1}}, {"query": {"from_": 1}}, {"query": {"query": {"match_all": {}}}, "from": 1}, {"query": {"query": {"match_all": {}}}, "from_": 1}, ], ) async def test_scan_from_keyword_is_aliased(async_client, scan_kwargs): with ( patch.object(async_client, "options", return_value=async_client), patch.object( async_client, "search", return_value=MockResponse( ObjectApiResponse( body={ "_scroll_id": "dummy_id", "_shards": {"successful": 5, "total": 5}, "hits": {"hits": []}, }, meta=None, ) ), ) as search_mock, patch.object(async_client, "clear_scroll", return_value=MockResponse(None)), ): [ x async for x in helpers.async_scan( async_client, index="test_index", **scan_kwargs ) ] assert search_mock.call_args[1]["from_"] == 1 assert "from" not in search_mock.call_args[1] @pytest_asyncio.fixture(scope="function") async def reindex_setup(async_client): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append( { "answer": x, "correct": x == 42, "type": "answers" if x % 2 == 0 else "questions", } ) await async_client.bulk(operations=bulk, refresh=True) yield class TestReindex: async def test_reindex_passes_kwargs_to_scan_and_bulk( self, async_client, reindex_setup ): await helpers.async_reindex( async_client, "test_index", "prod_index", scan_kwargs={"q": "type:answers"}, bulk_kwargs={"refresh": True}, ) assert await async_client.indices.exists(index="prod_index") assert ( 50 == (await async_client.count(index="prod_index", q="type:answers"))["count"] ) assert {"answer": 42, "correct": True, "type": "answers"} == ( await async_client.get(index="prod_index", id=42) )["_source"] async def test_reindex_accepts_a_query(self, async_client, reindex_setup): await helpers.async_reindex( async_client, "test_index", "prod_index", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}}, ) await async_client.indices.refresh() assert await async_client.indices.exists(index="prod_index") assert ( 50 == (await async_client.count(index="prod_index", q="type:answers"))["count"] ) assert {"answer": 42, "correct": True, "type": "answers"} == ( await async_client.get(index="prod_index", id=42) )["_source"] async def test_all_documents_get_moved(self, async_client, reindex_setup): await helpers.async_reindex(async_client, "test_index", "prod_index") await async_client.indices.refresh() assert await async_client.indices.exists(index="prod_index") assert ( 50 == (await async_client.count(index="prod_index", q="type:questions"))[ "count" ] ) assert ( 50 == (await async_client.count(index="prod_index", q="type:answers"))["count"] ) assert {"answer": 42, "correct": True, "type": "answers"} == ( await async_client.get(index="prod_index", id=42) )["_source"] @pytest_asyncio.fixture(scope="function") async def parent_reindex_setup(async_client): body = { "settings": {"number_of_shards": 1, "number_of_replicas": 0}, "mappings": { "properties": { "question_answer": { "type": "join", "relations": {"question": "answer"}, } } }, } await async_client.indices.create(index="test-index", body=body) await async_client.indices.create(index="real-index", body=body) await async_client.index( index="test-index", id=42, body={"question_answer": "question"} ) await async_client.index( index="test-index", id=47, routing=42, body={"some": "data", "question_answer": {"name": "answer", "parent": 42}}, ) await async_client.indices.refresh(index="test-index") class TestParentChildReindex: async def test_children_are_reindexed_correctly( self, async_client, parent_reindex_setup ): await helpers.async_reindex(async_client, "test-index", "real-index") q = await async_client.get(index="real-index", id=42) assert { "_id": "42", "_index": "real-index", "_primary_term": 1, "_seq_no": 0, "_source": {"question_answer": "question"}, "_version": 1, "found": True, } == q q = await async_client.get(index="test-index", id=47, routing=42) assert { "_routing": "42", "_id": "47", "_index": "test-index", "_primary_term": 1, "_seq_no": 1, "_source": { "some": "data", "question_answer": {"name": "answer", "parent": 42}, }, "_version": 1, "found": True, } == q @pytest_asyncio.fixture(scope="function") async def reindex_data_stream_setup(async_client): dt = datetime.now(tz=timezone.utc) bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index_stream", "_id": x}}) bulk.append( { "answer": x, "correct": x == 42, "type": "answers" if x % 2 == 0 else "questions", "@timestamp": (dt - timedelta(days=x)).isoformat(), } ) await async_client.bulk(operations=bulk, refresh=True) await async_client.indices.put_index_template( name="my-index-template", body={ "index_patterns": ["py-*-*"], "data_stream": {}, }, ) await async_client.indices.create_data_stream(name="py-test-stream") await async_client.indices.refresh() yield class TestAsyncDataStreamReindex: @pytest.mark.parametrize("op_type", [None, "create"]) async def test_reindex_index_datastream( self, op_type, async_client, reindex_data_stream_setup ): await helpers.async_reindex( async_client, source_index="test_index_stream", target_index="py-test-stream", scan_kwargs={"q": "type:answers"}, bulk_kwargs={"refresh": True}, op_type=op_type, ) # await async_client.indices.refresh() assert await async_client.indices.exists(index="py-test-stream") assert ( 50 == (await async_client.count(index="py-test-stream", q="type:answers"))[ "count" ] ) async def test_reindex_index_datastream_op_type_index( self, async_client, reindex_data_stream_setup ): with pytest.raises( ValueError, match="Data streams must have 'op_type' set to 'create'" ): await helpers.async_reindex( async_client, source_index="test_index_stream", target_index="py-test-stream", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}}, op_type="_index", ) python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/test_mapbox_vector_tile.py000066400000000000000000000107721506101734100335500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import pytest_asyncio from elasticsearch import RequestError pytestmark = pytest.mark.asyncio @pytest_asyncio.fixture(scope="function") async def mvt_setup(async_client): await async_client.indices.create( index="museums", body={ "mappings": { "properties": { "location": {"type": "geo_point"}, "name": {"type": "keyword"}, "price": {"type": "long"}, "included": {"type": "boolean"}, } } }, ) await async_client.bulk( index="museums", body=[ {"index": {"_id": "1"}}, { "location": "52.374081,4.912350", "name": "NEMO Science Museum", "price": 1750, "included": True, }, {"index": {"_id": "2"}}, { "location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis", "price": 1500, "included": False, }, {"index": {"_id": "3"}}, { "location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum", "price": 1650, "included": True, }, {"index": {"_id": "4"}}, { "location": "52.371667,4.914722", "name": "Amsterdam Centre for Architecture", "price": 0, "included": True, }, ], refresh=True, ) async def test_mapbox_vector_tile_error(async_client, mvt_setup): await async_client.search_mvt( index="museums", zoom=13, x=4207, y=2692, field="location", ) with pytest.raises(RequestError) as e: await async_client.search_mvt( index="museums", zoom=-100, x=4207, y=2692, field="location", ) assert str(e.value) == ( "BadRequestError(400, 'illegal_argument_exception', " "'Invalid geotile_grid precision of -100. Must be between 0 and 29.')" ) assert e.value.meta.status == 400 assert e.value.status_code == 400 assert e.value.body == { "error": { "root_cause": [ { "type": "illegal_argument_exception", "reason": "Invalid geotile_grid precision of -100. Must be between 0 and 29.", } ], "type": "illegal_argument_exception", "reason": "Invalid geotile_grid precision of -100. Must be between 0 and 29.", }, "status": 400, } async def test_mapbox_vector_tile_response(async_client, mvt_setup): try: import mapbox_vector_tile except ImportError: return pytest.skip("Requires the 'mapbox-vector-tile' package") resp = await async_client.search_mvt( index="museums", zoom=13, x=4207, y=2692, field="location", body={ "grid_precision": 2, "fields": ["name", "price"], "query": {"term": {"included": True}}, "aggs": { "min_price": {"min": {"field": "price"}}, "max_price": {"max": {"field": "price"}}, "avg_price": {"avg": {"field": "price"}}, }, }, ) assert resp.meta.status == 200 assert isinstance(resp.body, bytes) # Decode the binary as MVT tile = mapbox_vector_tile.decode(resp.body) # Assert some general things about the structure, mostly we want # to know that we got back a valid MVT. assert set(tile.keys()) == {"hits", "aggs", "meta"} python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_server/test_rest_api_spec.py000066400000000000000000000217051506101734100325010ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Dynamically generated set of TestCases based on set of yaml files describing some integration tests. These files are shared among all official Elasticsearch clients. """ import inspect import json import warnings import pytest import pytest_asyncio from elasticsearch import ElasticsearchWarning, RequestError from ...test_server.test_rest_api_spec import ( API_PARAMS_RENAMES, IMPLEMENTED_FEATURES, PARAMS_RENAMES, RUN_ASYNC_REST_API_TESTS, YAML_TEST_SPECS, YamlRunner, ) from ...utils import parse_version pytestmark = pytest.mark.asyncio XPACK_FEATURES = None ES_VERSION = None async def await_if_coro(x): if inspect.iscoroutine(x): return await x return x class AsyncYamlRunner(YamlRunner): async def setup(self): # Pull skips from individual tests to not do unnecessary setup. skip_code = [] for action in self._run_code: assert len(action) == 1 action_type, _ = list(action.items())[0] if action_type == "skip": skip_code.append(action) else: break if self._setup_code or skip_code: self.section("setup") if skip_code: await self.run_code(skip_code) if self._setup_code: await self.run_code(self._setup_code) async def teardown(self): if self._teardown_code: self.section("teardown") await self.run_code(self._teardown_code) async def es_version(self): global ES_VERSION if ES_VERSION is None: version_string = (await self.client.info())["version"]["number"] if "." not in version_string: return () version = version_string.strip().split(".") ES_VERSION = tuple(int(v) if v.isdigit() else 999 for v in version) return ES_VERSION def section(self, name): print(("=" * 10) + " " + name + " " + ("=" * 10)) async def run(self): try: await self.setup() self.section("test") await self.run_code(self._run_code) finally: try: await self.teardown() except Exception: pass async def run_code(self, test): """Execute an instruction based on it's type.""" for action in test: assert len(action) == 1 action_type, action = list(action.items())[0] print(action_type, action) if hasattr(self, "run_" + action_type): await await_if_coro(getattr(self, "run_" + action_type)(action)) else: raise RuntimeError(f"Invalid action type {action_type!r}") async def run_do(self, action): api = self.client headers = action.pop("headers", None) catch = action.pop("catch", None) warn = action.pop("warnings", ()) allowed_warnings = action.pop("allowed_warnings", ()) assert len(action) == 1 # Remove the x_pack_rest_user authentication # if it's given via headers. We're already authenticated # via the 'elastic' user. if ( headers and headers.get("Authorization", None) == "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" ): headers.pop("Authorization") method, args = list(action.items())[0] if headers: args["headers"] = headers # locate api endpoint for m in method.split("."): if not hasattr(api, m): pytest.skip("This API isn't implemented yet") api = getattr(api, m) # Sometimes the 'body' parameter is encoded as a string instead of raw. if "body" in args: try: args["body"] = json.loads(args["body"]) except (TypeError, ValueError): pass if isinstance(args["body"], dict): # Detect when there are duplicate options that aren't the same value. # In this case the test isn't testing the client, it's testing Elasticsearch # and its ability to reject multiple values so we either combine # like values or skip the test entirely as unnecessary for the client. duplicate_args = set(args["body"]).intersection(args) if duplicate_args: for arg in list(duplicate_args): if args["body"][arg] == args[arg]: args["body"].pop(arg) else: pytest.skip( "Contains a duplicate parameter with a different value" ) # some parameters had to be renamed to not clash with python builtins, # compensate renames = PARAMS_RENAMES.copy() renames.update(API_PARAMS_RENAMES.get(method, {})) for k in renames: if k in args: args[renames[k]] = args.pop(k) # resolve vars for k in args: args[k] = self._resolve(args[k]) warnings.simplefilter("always", category=ElasticsearchWarning) with warnings.catch_warnings(record=True) as caught_warnings: try: self.last_response = (await api(**args)).body except Exception as e: self._skip_intentional_type_errors(e) if not catch: raise self.run_catch(catch, e) else: if catch: raise AssertionError( f"Failed to catch {catch!r} in {self.last_response!r}." ) # Filter out warnings raised by other components. caught_warnings = [ str(w.message) for w in caught_warnings if w.category == ElasticsearchWarning and str(w.message) not in allowed_warnings ] # Sorting removes the issue with order raised. We only care about # if all warnings are raised in the single API call. if warn and sorted(warn) != sorted(caught_warnings): raise AssertionError( "Expected warnings not equal to actual warnings: expected=%r actual=%r" % (warn, caught_warnings) ) async def run_skip(self, skip): if "features" in skip: features = skip["features"] if not isinstance(features, (tuple, list)): features = [features] for feature in features: if feature in IMPLEMENTED_FEATURES: continue pytest.skip(f"feature '{feature}' is not supported") if "version" in skip: version, reason = skip["version"], skip["reason"] if version == "all": pytest.skip(reason) min_version, _, max_version = version.partition("-") min_version = parse_version(min_version.strip()) or (0,) max_version = parse_version(max_version.strip()) or (999,) if min_version <= (await self.es_version()) <= max_version: pytest.skip(reason) async def _feature_enabled(self, name): global XPACK_FEATURES if XPACK_FEATURES is None: try: xinfo = await self.client.xpack.info() XPACK_FEATURES = { f for f in xinfo["features"] if xinfo["features"][f]["enabled"] } IMPLEMENTED_FEATURES.add("xpack") except RequestError: XPACK_FEATURES = set() IMPLEMENTED_FEATURES.add("no_xpack") return name in XPACK_FEATURES @pytest_asyncio.fixture(scope="function") def async_runner(async_client_factory): return AsyncYamlRunner(async_client_factory) if RUN_ASYNC_REST_API_TESTS: @pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) async def test_rest_api_spec(test_spec, async_runner): if test_spec.get("fail", False): pytest.xfail("Manually marked as failing in 'FAILING_TESTS'") elif test_spec.get("skip", False): pytest.xfail("Manually skipped") async_runner.use_spec(test_spec) await async_runner.run() python-elasticsearch-9.1.1/test_elasticsearch/test_async/test_transport.py000066400000000000000000000620361506101734100273520ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import re import warnings from typing import Any, Dict, Optional import pytest from elastic_transport import ( ApiResponseMeta, BaseAsyncNode, HttpHeaders, NodeConfig, NodePool, ) from elastic_transport._node import NodeApiResponse from elastic_transport.client_utils import DEFAULT from elasticsearch import AsyncElasticsearch from elasticsearch.exceptions import ( ApiError, ConnectionError, ElasticsearchWarning, UnsupportedProductError, ) pytestmark = pytest.mark.asyncio class DummyNode(BaseAsyncNode): def __init__(self, config: NodeConfig): self.resp_status = config._extras.pop("status", 200) self.resp_error = config._extras.pop("exception", None) self.resp_data = config._extras.pop("data", b"{}") self.resp_headers = config._extras.pop( "headers", {"X-elastic-product": "Elasticsearch"} ) self.calls = [] self.closed = False super().__init__(config) async def perform_request(self, *args, **kwargs): self.calls.append((args, kwargs)) if self.resp_error: raise self.resp_error return NodeApiResponse( ApiResponseMeta( status=self.resp_status, headers=HttpHeaders(self.resp_headers), http_version="1.1", duration=0.0, node=self.config, ), self.resp_data, ) async def close(self): if self.closed: raise RuntimeError("This connection is already closed") self.closed = True class NoTimeoutConnectionPool(NodePool): def mark_dead(self, connection): pass def mark_live(self, connection): pass CLUSTER_NODES = """{ "_nodes" : { "total" : 1, "successful" : 1, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZ", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } } } }""" CLUSTER_NODES_7x_PUBLISH_HOST = """{ "_nodes" : { "total" : 1, "successful" : 1, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZ", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } } } }""" CLUSTER_NODES_MASTER_ONLY = """{ "_nodes" : { "total" : 2, "successful" : 2, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZa", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : ["master"], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } }, "SRZpKFZdQguhhvifmN6UVB" : { "name" : "SRZpKFZb", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:124", "max_content_length_in_bytes" : 104857600 } } } }""" class TestTransport: async def test_request_timeout_extracted_from_params_and_passed(self): client = AsyncElasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) await client.info(params={"request_timeout": 42}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["request_timeout"] == 42 async def test_opaque_id(self): client = AsyncElasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode, opaque_id="app-1", ) await client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["headers"]["x-opaque-id"] == "app-1" # Now try with an 'x-opaque-id' set on perform_request(). await client.info(opaque_id="request-2") calls = client.transport.node_pool.get().calls assert 2 == len(calls) assert calls[1][0] == ("GET", "/") assert calls[1][1]["headers"]["x-opaque-id"] == "request-2" async def test_request_with_custom_user_agent_header(self): client = AsyncElasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) await client.info(headers={"User-Agent": "my-custom-value/1.2.3"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["headers"]["user-agent"] == "my-custom-value/1.2.3" async def test_client_meta_header(self): client = AsyncElasticsearch("http://localhost:9200", node_class=DummyNode) await client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) headers = calls[0][1]["headers"] assert re.search( r"^es=[0-9.]+p?,py=[0-9.]+p?,t=[0-9.]+p?$", headers["x-elastic-client-meta"] ) class DummyNodeWithMeta(DummyNode): _CLIENT_META_HTTP_CLIENT = ("dm", "1.2.3") client = AsyncElasticsearch( "http://localhost:9200", node_class=DummyNodeWithMeta ) await client.info(headers={"CustoM": "header"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) headers = calls[0][1]["headers"] assert re.search( r"^es=[0-9.]+p?,py=[0-9.]+p?,t=[0-9.]+p?,dm=1.2.3$", headers["x-elastic-client-meta"], ) assert headers["Custom"] == "header" async def test_client_meta_header_not_sent(self): client = AsyncElasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) await client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][1]["headers"] == { "accept": "application/vnd.elasticsearch+json; compatible-with=9", } async def test_body_surrogates_replaced_encoded_into_bytes(self): client = AsyncElasticsearch("http://localhost:9200", node_class=DummyNode) await client.search(query={"match": "你好\uda6a"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert ( calls[0][1]["body"] == b'{"query":{"match":"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa"}}' ) def test_kwargs_passed_on_to_node_pool(self): dt = object() client = AsyncElasticsearch( "http://localhost:9200", dead_node_backoff_factor=dt ) assert dt is client.transport.node_pool.dead_node_backoff_factor class MyConnection: def __init__(self, *_, **__): pass async def perform_request(*_, **__): pass client = AsyncElasticsearch("http://localhost:9200", node_class=MyConnection) assert 1 == len(client.transport.node_pool) assert isinstance(client.transport.node_pool.all()[0], MyConnection) async def test_request_will_fail_after_x_retries(self): client = AsyncElasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"exception": ConnectionError("abandon ship!")}, ) ], node_class=DummyNode, ) with pytest.raises(ConnectionError) as e: await client.info() calls = client.transport.node_pool.get().calls assert 4 == len(calls) assert len(e.value.errors) == 3 del calls[:] with pytest.raises(ConnectionError): await client.options(max_retries=5).info() calls = client.transport.node_pool.get().calls assert 6 == len(calls) async def test_failed_connection_will_be_marked_as_dead(self): client = AsyncElasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"exception": ConnectionError("abandon ship!")}, ), NodeConfig( "http", "localhost", 9201, _extras={"exception": ConnectionError("abandon ship!")}, ), ], node_class=DummyNode, ) with pytest.raises(ConnectionError): await client.info() assert 0 == len(client.transport.node_pool._alive_nodes) async def test_resurrected_connection_will_be_marked_as_live_on_success(self): client = AsyncElasticsearch( [ NodeConfig("http", "localhost", 9200), NodeConfig("http", "localhost", 9201), ], node_class=DummyNode, ) node1 = client.transport.node_pool.get() node2 = client.transport.node_pool.get() assert node1 is not node2 client.transport.node_pool.mark_dead(node1) client.transport.node_pool.mark_dead(node2) assert len(client.transport.node_pool._alive_nodes) == 0 await client.info() assert len(client.transport.node_pool._alive_nodes) == 1 assert len(client.transport.node_pool._dead_consecutive_failures) == 1 async def test_override_mark_dead_mark_live(self): client = AsyncElasticsearch( [ NodeConfig("http", "localhost", 9200), NodeConfig("http", "localhost", 9201), ], node_class=DummyNode, node_pool_class=NoTimeoutConnectionPool, ) node1 = client.transport.node_pool.get() node2 = client.transport.node_pool.get() assert node1 is not node2 client.transport.node_pool.mark_dead(node1) client.transport.node_pool.mark_dead(node2) assert len(client.transport.node_pool._alive_nodes) == 2 await client.info() assert len(client.transport.node_pool._alive_nodes) == 2 assert len(client.transport.node_pool._dead_consecutive_failures) == 0 @pytest.mark.parametrize( ["nodes_info_response", "node_host"], [(CLUSTER_NODES, "1.1.1.1"), (CLUSTER_NODES_7x_PUBLISH_HOST, "somehost.tld")], ) async def test_sniff_will_use_seed_connections( self, nodes_info_response, node_host ): client = AsyncElasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"data": nodes_info_response} ) ], node_class=DummyNode, sniff_on_start=True, ) # Async sniffing happens in the background. await client.transport._async_call() assert client.transport._sniffing_task is not None await client.transport._sniffing_task node_configs = [node.config for node in client.transport.node_pool.all()] assert len(node_configs) == 2 assert NodeConfig("http", node_host, 123) in node_configs async def test_sniff_on_start_ignores_sniff_timeout(self): client = AsyncElasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_on_start=True, sniff_timeout=12, meta_header=False, ) # Async sniffing happens in the background. await client.transport._async_call() assert client.transport._sniffing_task is not None await client.transport._sniffing_task calls = client.transport.node_pool.all()[0].calls assert len(calls) == 1 assert calls[0] == ( ("GET", "/_nodes/_all/http"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, "request_timeout": None, # <-- Should be None instead of 12 }, ) async def test_sniff_uses_sniff_timeout(self): client = AsyncElasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_before_requests=True, sniff_timeout=12, meta_header=False, ) await client.info() # Async sniffing happens in the background. assert client.transport._sniffing_task is not None await client.transport._sniffing_task calls = client.transport.node_pool.all()[0].calls assert len(calls) == 2 assert calls[0] == ( ("GET", "/"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "request_timeout": DEFAULT, }, ) assert calls[1] == ( ("GET", "/_nodes/_all/http"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, "request_timeout": 12, }, ) async def test_sniff_on_start_awaits_before_request(self): client = AsyncElasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_on_start=True, sniff_timeout=12, meta_header=False, ) await client.info() calls = client.transport.node_pool.all()[0].calls assert len(calls) == 2 # The sniff request happens first. assert calls[0][0] == ("GET", "/_nodes/_all/http") assert calls[1][0] == ("GET", "/") async def test_sniff_reuses_node_instances(self): client = AsyncElasticsearch( [NodeConfig("http", "1.1.1.1", 123, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_on_start=True, ) assert len(client.transport.node_pool) == 1 await client.info() assert len(client.transport.node_pool) == 1 @pytest.mark.parametrize( ["extra_key", "extra_value"], [("exception", ConnectionError("Abandon ship!")), ("status", 500)], ) async def test_sniff_on_node_failure_triggers(self, extra_key, extra_value): client = AsyncElasticsearch( [ NodeConfig("http", "localhost", 9200, _extras={extra_key: extra_value}), NodeConfig("http", "localhost", 9201, _extras={"data": CLUSTER_NODES}), ], node_class=DummyNode, sniff_on_node_failure=True, randomize_nodes_in_pool=False, max_retries=0, ) request_failed_in_error = False try: await client.info() except (ConnectionError, ApiError): request_failed_in_error = True assert client.transport._sniffing_task is not None await client.transport._sniffing_task assert request_failed_in_error assert len(client.transport.node_pool) == 3 async def test_sniff_after_n_seconds(self): event_loop = asyncio.get_running_loop() client = AsyncElasticsearch( # noqa: F821 [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, min_delay_between_sniffing=5, ) client.transport._last_sniffed_at = event_loop.time() await client.info() for _ in range(4): await client.info() await asyncio.sleep(0) assert 1 == len(client.transport.node_pool) client.transport._last_sniffed_at = event_loop.time() - 5.1 await client.info() await client.transport._sniffing_task # Need to wait for the sniffing task to complete assert 2 == len(client.transport.node_pool) assert "http://1.1.1.1:123" in ( node.base_url for node in client.transport.node_pool.all() ) assert ( event_loop.time() - 1 < client.transport._last_sniffed_at < event_loop.time() + 0.01 ) @pytest.mark.parametrize( "kwargs", [ {"sniff_on_start": True}, {"sniff_on_node_failure": True}, {"sniff_before_requests": True}, {"sniff_timeout": 1}, ], ) async def test_sniffing_disabled_on_elastic_cloud(self, kwargs): with pytest.raises(ValueError) as e: AsyncElasticsearch( cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", **kwargs, ) assert ( str(e.value) == "Sniffing should not be enabled when connecting to Elastic Cloud" ) async def test_sniff_on_start_close_unlocks_async_calls(self): event_loop = asyncio.get_running_loop() client = AsyncElasticsearch( # noqa: F821 [ NodeConfig( "http", "localhost", 9200, _extras={"delay": 10, "data": CLUSTER_NODES}, ), ], node_class=DummyNode, sniff_on_start=True, ) # Start making _async_calls() before we cancel tasks = [] start_time = event_loop.time() for _ in range(3): tasks.append(event_loop.create_task(client.info())) await asyncio.sleep(0) # Close the transport while the sniffing task is active! :( await client.transport.close() # Now we start waiting on all those _async_calls() await asyncio.gather(*tasks) end_time = event_loop.time() duration = end_time - start_time # A lot quicker than 10 seconds defined in 'delay' assert duration < 1 async def test_sniffing_master_only_filtered_by_default(self): client = AsyncElasticsearch( # noqa: F821 [ NodeConfig( "http", "localhost", 9200, _extras={"data": CLUSTER_NODES_MASTER_ONLY}, ) ], node_class=DummyNode, sniff_on_start=True, ) await client.transport._async_call() assert len(client.transport.node_pool) == 2 async def test_sniff_node_callback(self): def sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: return ( node_config if node_info["http"]["publish_address"].endswith(":124") else None ) client = AsyncElasticsearch( # noqa: F821 [ NodeConfig( "http", "localhost", 9200, _extras={"data": CLUSTER_NODES_MASTER_ONLY}, ) ], node_class=DummyNode, sniff_on_start=True, sniffed_node_callback=sniffed_node_callback, ) await client.transport._async_call() assert len(client.transport.node_pool) == 2 ports = {node.config.port for node in client.transport.node_pool.all()} assert ports == {9200, 124} @pytest.mark.parametrize("headers", [{}, {"X-elastic-product": "BAD HEADER"}]) async def test_unsupported_product_error(headers): client = AsyncElasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"headers": headers})], meta_header=False, node_class=DummyNode, ) with pytest.raises(UnsupportedProductError) as e: await client.info() assert str(e.value) == ( "The client noticed that the server is not Elasticsearch " "and we do not support this unknown product" ) calls = client.transport.node_pool.get().calls assert len(calls) == 1 assert calls[0] == ( ("GET", "/"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "request_timeout": DEFAULT, }, ) @pytest.mark.parametrize("status", [401, 403, 413, 500]) async def test_unsupported_product_error_not_raised_on_non_2xx(status): client = AsyncElasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"headers": {}, "status": status} ) ], meta_header=False, node_class=DummyNode, ) try: await client.info() except UnsupportedProductError: assert False, "Raised UnsupportedProductError" except ApiError as e: assert e.meta.status == status @pytest.mark.parametrize("status", [404, 500]) async def test_api_error_raised_before_product_error(status): client = AsyncElasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={ "headers": {"X-elastic-product": "BAD HEADER"}, "status": status, }, ) ], meta_header=False, node_class=DummyNode, ) with pytest.raises(ApiError) as e: await client.info() assert e.value.status_code == status assert not isinstance(e.value, UnsupportedProductError) calls = client.transport.node_pool.get().calls assert len(calls) == 1 assert calls[0][0] == ("GET", "/") @pytest.mark.parametrize( "headers", [ { "Warning": '299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."', "X-elastic-product": "Elasticsearch", }, { "Warning": '299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", 299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration2] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."', "X-elastic-product": "Elasticsearch", }, ], ) async def test_warning_header(headers): client = AsyncElasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"headers": headers})], meta_header=False, node_class=DummyNode, ) with warnings.catch_warnings(record=True) as w: await client.info() assert len(w) == headers["Warning"].count("299") assert w[0].category == ElasticsearchWarning assert ( str(w[0].message) == "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." ) python-elasticsearch-9.1.1/test_elasticsearch/test_cases.py000066400000000000000000000056101506101734100242330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from collections import defaultdict from elastic_transport import ApiResponseMeta, HttpHeaders from elasticsearch import Elasticsearch class DummyTransport: def __init__(self, hosts, responses=None, **_): self.hosts = hosts self.responses = responses self.call_count = 0 self.calls = defaultdict(list) def perform_request(self, method, target, **kwargs): status, resp = 200, {} if self.responses: status, resp = self.responses[self.call_count] self.call_count += 1 self.calls[(method, target)].append(kwargs) return ( ApiResponseMeta( status=status, http_version="1.1", headers=HttpHeaders({"X-elastic-product": "Elasticsearch"}), duration=0.0, node=None, ), resp, ) class DummyAsyncTransport: def __init__(self, hosts, responses=None, **_): self.hosts = hosts self.responses = responses self.call_count = 0 self.calls = defaultdict(list) async def perform_request(self, method, target, **kwargs): status, resp = 200, {} if self.responses: status, resp = self.responses[self.call_count] self.call_count += 1 self.calls[(method, target)].append(kwargs) return ( ApiResponseMeta( status=status, http_version="1.1", headers=HttpHeaders({"X-elastic-product": "Elasticsearch"}), duration=0.0, node=None, ), resp, ) class DummyTransportTestCase: def setup_method(self, _): self.client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport ) def assert_call_count_equals(self, count): assert count == self.client.transport.call_count def assert_url_called(self, method, url, count=1): assert (method, url) in self.client.transport.calls calls = self.client.transport.calls[(method, url)] assert count == len(calls) return calls python-elasticsearch-9.1.1/test_elasticsearch/test_client/000077500000000000000000000000001506101734100240375ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_client/__init__.py000066400000000000000000000014231506101734100261500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_cluster.py000066400000000000000000000035331506101734100271350ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from test_elasticsearch.test_cases import DummyTransportTestCase class TestCluster(DummyTransportTestCase): def test_stats_without_node_id(self): self.client.cluster.stats() self.assert_url_called("GET", "/_cluster/stats") def test_stats_with_node_id(self): self.client.cluster.stats(node_id="node-1") self.assert_url_called("GET", "/_cluster/stats/nodes/node-1") self.client.cluster.stats(node_id="node-2") self.assert_url_called("GET", "/_cluster/stats/nodes/node-2") def test_state_with_index_without_metric_defaults_to_all(self): self.client.cluster.state() self.assert_url_called("GET", "/_cluster/state") self.client.cluster.state(metric="cluster_name") self.assert_url_called("GET", "/_cluster/state/cluster_name") self.client.cluster.state(index="index-1") self.assert_url_called("GET", "/_cluster/state/_all/index-1") self.client.cluster.state(index="index-1", metric="cluster_name") self.assert_url_called("GET", "/_cluster/state/cluster_name/index-1") python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_deprecated_options.py000066400000000000000000000032421506101734100313240ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import warnings import pytest from elasticsearch import Elasticsearch def test_http_auth(): with warnings.catch_warnings(record=True) as w: client = Elasticsearch( "http://localhost:9200", http_auth=("username", "password") ) assert len(w) == 1 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "The 'http_auth' parameter is deprecated. Use 'basic_auth' or 'bearer_auth' parameters instead" ) assert client._headers["Authorization"] == "Basic dXNlcm5hbWU6cGFzc3dvcmQ=" with pytest.raises(ValueError) as e: Elasticsearch( "http://localhost:9200", http_auth=("username", "password"), basic_auth=("username", "password"), ) assert ( str(e.value) == "Can't specify both 'http_auth' and 'basic_auth', instead only specify 'basic_auth'" ) python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_indices.py000066400000000000000000000037611506101734100270750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from test_elasticsearch.test_cases import DummyTransportTestCase class TestIndices(DummyTransportTestCase): def test_create_one_index(self): self.client.indices.create(index="test-index") self.assert_url_called("PUT", "/test-index") def test_delete_multiple_indices(self): self.client.indices.delete(index=["test-index", "second.index", "third/index"]) self.assert_url_called("DELETE", "/test-index,second.index,third%2Findex") def test_exists_index(self): self.client.indices.exists(index="second.index,third/index") self.assert_url_called("HEAD", "/second.index,third%2Findex") def test_passing_empty_value_for_required_param_raises_exception(self): with pytest.raises(ValueError): self.client.indices.exists(index=None) with pytest.raises(ValueError): self.client.indices.exists(index=[]) with pytest.raises(ValueError): self.client.indices.exists(index="") def test_query_params(self): self.client.indices.delete( index=["test1", "test*"], expand_wildcards=["open", "closed"] ) self.assert_url_called("DELETE", "/test1,test*?expand_wildcards=open,closed") python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_options.py000066400000000000000000000451711506101734100271530ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elastic_transport import OpenTelemetrySpan from elastic_transport.client_utils import DEFAULT from elasticsearch import AsyncElasticsearch, Elasticsearch, JsonSerializer from elasticsearch._sync.client.utils import USER_AGENT from test_elasticsearch.test_cases import ( DummyAsyncTransport, DummyTransport, DummyTransportTestCase, ) EXPECTED_SERIALIZERS = { "application/vnd.mapbox-vector-tile", "application/x-ndjson", "application/json", "text/*", "application/vnd.elasticsearch+json", "application/vnd.elasticsearch+x-ndjson", } try: import pyarrow as pa EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream") except ImportError: pa = None class TestOptions(DummyTransportTestCase): def assert_called_with_headers(self, client, method, target, headers): calls = client.transport.calls assert (method, target) in calls called_headers = calls[(method, target)][-1]["headers"].copy() for header in ( "accept", "content-type", ): # Common HTTP headers that we're not testing. called_headers.pop(header, None) assert headers == called_headers @pytest.mark.parametrize( ["options", "headers"], [ ( {"headers": {"authorization": "custom method"}}, {"Authorization": "custom method"}, ), ({"api_key": "key"}, {"Authorization": "ApiKey key"}), ({"api_key": ("id", "value")}, {"Authorization": "ApiKey aWQ6dmFsdWU="}), ( {"basic_auth": ("username", "password")}, {"Authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ="}, ), ({"basic_auth": "encoded"}, {"Authorization": "Basic encoded"}), ({"bearer_auth": "bear"}, {"Authorization": "Bearer bear"}), ( {"opaque_id": "test-id"}, {"X-Opaque-Id": "test-id"}, ), ( { "opaque_id": "opaq-id", "headers": {"custom": "key"}, "api_key": ("id", "val"), }, { "custom": "key", "authorization": "ApiKey aWQ6dmFs", "x-opaque-id": "opaq-id", }, ), ], ) def test_options_to_headers(self, options, headers): # Tests that authentication works identically from the constructor # or from the client.options() API. client = self.client.options(**options) client.indices.exists(index="test") self.assert_called_with_headers(client, "HEAD", "/test", headers=headers) client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, **options ) client.indices.exists(index="test") self.assert_called_with_headers(client, "HEAD", "/test", headers=headers) client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, headers={"Authorization": "not it"}, ) client = self.client.options(**options) client.indices.exists(index="test") self.assert_called_with_headers(client, "HEAD", "/test", headers=headers) @pytest.mark.parametrize("api_key", [None, "api-key", ("api", "key")]) @pytest.mark.parametrize("bearer_auth", [None, "bearer"]) @pytest.mark.parametrize("basic_auth", [None, "user:pass", ("user", "pass")]) @pytest.mark.parametrize( "headers", [None, {"Authorization": "value"}, {"authorization": "value"}] ) def test_options_auth_conflicts(self, api_key, bearer_auth, basic_auth, headers): if sum(x is not None for x in (api_key, bearer_auth, basic_auth, headers)) < 2: pytest.skip("Skip the cases where 1 or fewer options are unset") kwargs = { k: v for k, v in { "api_key": api_key, "bearer_auth": bearer_auth, "basic_auth": basic_auth, "headers": headers, }.items() if v is not None } with pytest.raises(ValueError) as e: self.client.options(**kwargs) assert str(e.value) in ( "Can only set one of 'api_key', 'basic_auth', and 'bearer_auth'", "Can't set 'Authorization' HTTP header with other authentication options", ) def test_options_passed_to_perform_request(self): # Default transport options are 'DEFAULT' to rely on 'elastic_transport' defaults. client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, ) client.indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("request_timeout") is DEFAULT assert call.pop("max_retries") is DEFAULT assert call.pop("retry_on_timeout") is DEFAULT assert call.pop("retry_on_status") is DEFAULT assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, } # Can be overwritten with .options() client.options( request_timeout=1, max_retries=2, retry_on_timeout=False, retry_on_status=(404,), ).indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][1] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": False, } # Can be overwritten on constructor client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, request_timeout=1, max_retries=2, retry_on_timeout=False, retry_on_status=(404,), ) client.indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": False, } @pytest.mark.asyncio async def test_options_passed_to_async_perform_request(self): # Default transport options are 'DEFAULT' to rely on 'elastic_transport' defaults. client = AsyncElasticsearch( "http://localhost:9200", transport_class=DummyAsyncTransport, ) await client.indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("request_timeout") is DEFAULT assert call.pop("max_retries") is DEFAULT assert call.pop("retry_on_timeout") is DEFAULT assert call.pop("retry_on_status") is DEFAULT assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, } # Can be overwritten with .options() await client.options( request_timeout=1, max_retries=2, retry_on_timeout=False, retry_on_status=(404,), ).indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][1] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": False, } # Can be overwritten on constructor client = AsyncElasticsearch( "http://localhost:9200", transport_class=DummyAsyncTransport, request_timeout=1, max_retries=2, retry_on_timeout=False, retry_on_status=(404,), ) await client.indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": False, } def test_default_node_configs(self): client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, headers={"key": "val"}, basic_auth=("username", "password"), ) assert client._headers == { "key": "val", "authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=", } assert len(client.transport.hosts) == 1 node_config = client.transport.hosts[0] assert node_config.scheme == "http" assert node_config.host == "localhost" assert node_config.port == 9200 assert node_config.path_prefix == "" assert node_config.headers == {"user-agent": USER_AGENT} def test_http_headers_overrides(self): client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, headers={"key": "val"}, ) calls = client.transport.calls client.indices.get(index="1") call = calls[("GET", "/1")][0] assert call["headers"] == { "key": "val", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client.options(headers={"key1": "val"}).indices.get(index="2") call = calls[("GET", "/2")][0] assert call["headers"] == { "key": "val", "key1": "val", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client.options(headers={"key": "val2"}).indices.get(index="3") call = calls[("GET", "/3")][0] assert call["headers"] == { "key": "val2", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client = Elasticsearch( "http://username:password@localhost:9200", transport_class=DummyTransport, headers={"key": "val"}, ) calls = client.transport.calls node_config = client.transport.hosts[0] assert node_config.headers == { "authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQ=", "user-agent": USER_AGENT, } assert client._headers == {"key": "val"} def test_user_agent_override(self): client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, ) calls = client.transport.calls client.options(headers={"user-agent": "custom1"}).indices.get(index="1") call = calls[("GET", "/1")][0] assert call["headers"] == { "user-agent": "custom1", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client.indices.get(index="2", headers={"user-agent": "custom2"}) call = calls[("GET", "/2")][0] assert call["headers"] == { "user-agent": "custom2", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, headers={"User-Agent": "custom3"}, ) calls = client.transport.calls client.indices.get(index="1") call = calls[("GET", "/1")][0] assert call["headers"] == { "user-agent": "custom3", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } client.indices.get(index="2", headers={"user-agent": "custom4"}) call = calls[("GET", "/2")][0] assert call["headers"] == { "user-agent": "custom4", "accept": "application/vnd.elasticsearch+json; compatible-with=9", } def test_options_timeout_parameters(self): client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, request_timeout=1, max_retries=2, retry_on_status=(404,), retry_on_timeout=True, ) # timeout parameters are preserved with .options() client.options().indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": True, } client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, request_timeout=1, max_retries=2, retry_on_status=(404,), retry_on_timeout=True, ) client.options( request_timeout=2, max_retries=3, retry_on_status=(400,), retry_on_timeout=False, ).indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 2, "max_retries": 3, "retry_on_status": (400,), "retry_on_timeout": False, } client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, ) client.options().indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("request_timeout") is DEFAULT assert call.pop("max_retries") is DEFAULT assert call.pop("retry_on_timeout") is DEFAULT assert call.pop("retry_on_status") is DEFAULT assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, } client = Elasticsearch( "http://localhost:9200", transport_class=DummyTransport, ) client.options( request_timeout=1, max_retries=2, retry_on_status=(404,), retry_on_timeout=True, ).indices.get(index="test") calls = client.transport.calls call = calls[("GET", "/test")][0] assert call.pop("client_meta") is DEFAULT assert isinstance(call.pop("otel_span"), OpenTelemetrySpan) assert call == { "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "body": None, "request_timeout": 1, "max_retries": 2, "retry_on_status": (404,), "retry_on_timeout": True, } def test_serializer_and_serializers(self): with pytest.raises(ValueError) as e: Elasticsearch( "http://localhost:9200", serializer=JsonSerializer(), serializers={"application/json": JsonSerializer()}, ) assert str(e.value) == ( "Can't specify both 'serializer' and 'serializers' parameters together. " "Instead only specify one of the other." ) class CustomSerializer(JsonSerializer): pass client = Elasticsearch("http://localhost:9200", serializer=CustomSerializer()) assert isinstance( client.transport.serializers.get_serializer("application/json"), CustomSerializer, ) assert ( set(client.transport.serializers.serializers.keys()) == EXPECTED_SERIALIZERS ) client = Elasticsearch( "http://localhost:9200", serializers={ "application/json": CustomSerializer(), "application/cbor": CustomSerializer(), }, ) assert isinstance( client.transport.serializers.get_serializer("application/json"), CustomSerializer, ) expected = EXPECTED_SERIALIZERS | {"application/cbor"} assert set(client.transport.serializers.serializers.keys()) == expected python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_overrides.py000066400000000000000000000107441506101734100274600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from ..test_cases import DummyTransportTestCase class TestOverriddenUrlTargets(DummyTransportTestCase): def test_create(self): self.client.create(index="test-index", id="test-id", body={}) self.assert_url_called("PUT", "/test-index/_create/test-id") def test_delete(self): self.client.delete(index="test-index", id="test-id") self.assert_url_called("DELETE", "/test-index/_doc/test-id") def test_index(self): self.client.index(index="test-index", document={}) self.assert_url_called("POST", "/test-index/_doc") self.client.index(index="test-index", id="test-id", document={}) self.assert_url_called("PUT", "/test-index/_doc/test-id") def test_update(self): self.client.update(index="test-index", id="test-id", body={}) self.assert_url_called("POST", "/test-index/_update/test-id") def test_cluster_state(self): self.client.cluster.state() self.assert_url_called("GET", "/_cluster/state") self.client.cluster.state(index="test-index") self.assert_url_called("GET", "/_cluster/state/_all/test-index") self.client.cluster.state(index="test-index", metric="test-metric") self.assert_url_called("GET", "/_cluster/state/test-metric/test-index") def test_cluster_stats(self): self.client.cluster.stats() self.assert_url_called("GET", "/_cluster/stats") self.client.cluster.stats(node_id="test-node") self.assert_url_called("GET", "/_cluster/stats/nodes/test-node") def test_index_uses_post_if_id_is_empty(self): self.client.index(index="my-index", id="", document={}) self.assert_url_called("POST", "/my-index/_doc") def test_index_uses_put_if_id_is_not_empty(self): self.client.index(index="my-index", id=0, document={}) self.assert_url_called("PUT", "/my-index/_doc/0") @pytest.mark.parametrize("param_name", ["from", "from_"]) def test_from_in_search(self, param_name): self.client.search(index="i", **{param_name: 10}) calls = self.assert_url_called("POST", "/i/_search") assert calls[0]["body"] == {"from": 10} def test_sort_in_search(self): self.client.search(index="i", sort="@timestamp:asc") calls = self.assert_url_called("POST", "/i/_search?sort=%40timestamp%3Aasc") assert calls[0]["body"] is None self.client.search(index="i", sort=["@timestamp:asc", "field"]) calls = self.assert_url_called( "POST", "/i/_search?sort=%40timestamp%3Aasc,field" ) assert calls[0]["body"] is None self.client.search(index="i", sort=("field", "@timestamp:asc")) calls = self.assert_url_called( "POST", "/i/_search?sort=field,%40timestamp%3Aasc" ) assert calls[0]["body"] is None self.client.search(index="i", sort=("field", "@timestamp")) calls = self.assert_url_called("POST", "/i/_search") assert calls[-1]["body"] == {"sort": ("field", "@timestamp")} self.client.search(index="i2", sort=["@timestamp", "field"]) calls = self.assert_url_called("POST", "/i2/_search") assert calls[-1]["body"] == {"sort": ["@timestamp", "field"]} self.client.search( index="i3", sort=("field", "@timestamp:asc", {"field": "desc"}) ) calls = self.assert_url_called("POST", "/i3/_search") assert calls[-1]["body"] == { "sort": ("field", "@timestamp:asc", {"field": "desc"}) } self.client.search(index="i4", sort=["@timestamp:asc", {"field": "desc"}]) calls = self.assert_url_called("POST", "/i4/_search") assert calls[-1]["body"] == {"sort": ["@timestamp:asc", {"field": "desc"}]} python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_requests_auth.py000066400000000000000000000054651506101734100303560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import warnings import pytest import requests from elastic_transport import RequestsHttpNode, Urllib3HttpNode from elastic_transport.client_utils import DEFAULT from requests.auth import HTTPBasicAuth from elasticsearch import AsyncElasticsearch, Elasticsearch class CustomRequestHttpNode(RequestsHttpNode): pass class CustomUrllib3HttpNode(Urllib3HttpNode): pass @pytest.mark.parametrize( "node_class", ["requests", RequestsHttpNode, CustomRequestHttpNode] ) def test_requests_auth(node_class): http_auth = HTTPBasicAuth("username", "password") with warnings.catch_warnings(record=True) as w: client = Elasticsearch( "http://localhost:9200", http_auth=http_auth, node_class=node_class ) # http_auth is deprecated for all other cases except this one. assert len(w) == 0 # Instance should be forwarded directly to requests.Session.auth. node = client.transport.node_pool.get() assert isinstance(node, RequestsHttpNode) assert isinstance(node.session, requests.Session) assert node.session.auth is http_auth @pytest.mark.parametrize("client_class", [Elasticsearch, AsyncElasticsearch]) @pytest.mark.parametrize( "node_class", ["urllib3", "aiohttp", None, DEFAULT, CustomUrllib3HttpNode] ) def test_error_for_requests_auth_node_class(client_class, node_class): http_auth = HTTPBasicAuth("username", "password") with pytest.raises(ValueError) as e: client_class( "http://localhost:9200", http_auth=http_auth, node_class=node_class ) assert str(e.value) == ( "Using a custom 'requests.auth.AuthBase' class for " "'http_auth' must be used with node_class='requests'" ) def test_error_for_requests_auth_async(): http_auth = HTTPBasicAuth("username", "password") with pytest.raises(ValueError) as e: AsyncElasticsearch( "http://localhost:9200", http_auth=http_auth, node_class="requests" ) assert str(e.value) == ( "Specified 'node_class' is not async, should be async instead" ) python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_rewrite_parameters.py000066400000000000000000000252641506101734100313650ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import warnings import pytest from elasticsearch import AsyncElasticsearch, Elasticsearch from elasticsearch._sync.client.utils import _rewrite_parameters class TestRewriteParameters: @property def calls(self): if not hasattr(self, "_calls"): self._calls = [] return self._calls def options(self, *args, **kwargs): self.calls.append((args, kwargs)) return self @_rewrite_parameters() def wrapped_func_default(self, *args, **kwargs): self.calls.append((args, kwargs)) @_rewrite_parameters(body_name="document") def wrapped_func_body_name(self, *args, **kwargs): self.calls.append((args, kwargs)) @_rewrite_parameters(body_fields=("query", "source")) def wrapped_func_body_fields(self, *args, **kwargs): self.calls.append((args, kwargs)) @_rewrite_parameters( body_fields=("query",), ignore_deprecated_options={"api_key", "body", "params"} ) def wrapped_func_ignore(self, *args, **kwargs): self.calls.append((args, kwargs)) @_rewrite_parameters( body_fields=("source",), parameter_aliases={"_source": "source"} ) def wrapped_func_aliases(self, *args, **kwargs): self.calls.append((args, kwargs)) def test_default(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_default( api_key=("id", "api_key"), query={"match_all": {}}, params={"key": "value", "ignore": 404}, ) assert len(w) == 2 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "The 'params' parameter is deprecated and will be removed in a future version. Instead use individual parameters." ) assert w[1].category == DeprecationWarning assert ( str(w[1].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead." ) assert self.calls == [ ((), {"api_key": ("id", "api_key"), "ignore_status": 404}), ((), {"query": {"match_all": {}}, "key": "value"}), ] def test_default_params_conflict(self): with pytest.raises(ValueError) as e: self.wrapped_func_default( query={"match_all": {}}, params={"query": {"match_all": {}}}, ) assert str(e.value) == ( "Received multiple values for 'query', specify parameters directly instead of using 'params'" ) def test_body_name_using_body(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_body_name( api_key=("id", "api_key"), body={"query": {"match_all": {}}} ) assert len(w) == 1 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead." ) assert self.calls == [ ((), {"api_key": ("id", "api_key")}), ((), {"document": {"query": {"match_all": {}}}}), ] def test_body_name(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_body_name( api_key=("id", "api_key"), document={"query": {"match_all": {}}} ) assert len(w) == 1 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead." ) assert self.calls == [ ((), {"api_key": ("id", "api_key")}), ((), {"document": {"query": {"match_all": {}}}}), ] def test_body_name_duplicate(self): with pytest.raises(TypeError) as e: self.wrapped_func_body_name(body={}, document={}) assert str(e.value) == ( "Can't use 'document' and 'body' parameters together because 'document' is an alias for 'body'. " "Instead you should only use the 'document' parameter. See https://github.com/elastic/elasticsearch-py" "/issues/1698 for more information" ) def test_body_fields(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_body_fields( api_key=("id", "api_key"), body={"query": {"match_all": {}}} ) assert len(w) == 1 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead." ) assert self.calls == [ ((), {"api_key": ("id", "api_key")}), ((), {"body": {"query": {"match_all": {}}}}), ] @pytest.mark.parametrize( "body, kwargs", [ ('{"query": {"match_all": {}}}', {"query": {"match_all": {}}}), (b'{"query": {"match_all": {}}}', {"query": {"match_all": {}}}), ], ) def test_error_on_body_merge(self, body, kwargs): with pytest.raises(ValueError) as e: self.wrapped_func_body_fields(body=body, **kwargs) assert str(e.value) == ( "Couldn't merge 'body' with other parameters as it wasn't a mapping." ) @pytest.mark.parametrize( "params", ['{"query": {"match_all": {}}}', b'{"query": {"match_all": {}}}'] ) def test_error_on_params_merge(self, params): with pytest.raises(ValueError) as e: self.wrapped_func_body_fields(params=params) assert str(e.value) == ( "Couldn't merge 'params' with other parameters as it wasn't a mapping. Instead of " "using 'params' use individual API parameters" ) def test_body_fields_merge(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_body_fields(source=False, body={"query": {}}) assert len(w) == 1 assert w[0].category == DeprecationWarning assert str(w[0].message) == ( "Received 'source' via a specific parameter in the presence of a " "'body' parameter, which is deprecated and will be removed in a future " "version. Instead, use only 'body' or only specific parameters." ) def test_body_fields_conflict(self): with pytest.raises(ValueError) as e: self.wrapped_func_body_fields(query={"match_all": {}}, body={"query": {}}) assert str(e.value) == ( "Received multiple values for 'query', specify parameters using either body or parameters, not both." ) def test_ignore_deprecated_options(self): with warnings.catch_warnings(record=True) as w: self.wrapped_func_ignore( api_key=("id", "api_key"), body={"query": {"match_all": {}}}, params={"key": "value"}, param=1, http_auth=("key", "value"), ) assert len(w) == 1 assert w[0].category == DeprecationWarning assert ( str(w[0].message) == "Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead." ) assert self.calls == [ ((), {"http_auth": ("key", "value")}), ( (), { "api_key": ("id", "api_key"), "body": {"query": {"match_all": {}}}, "params": {"key": "value"}, "param": 1, }, ), ] def test_parameter_aliases(self): self.wrapped_func_aliases(_source=["key1", "key2"]) assert self.calls == [((), {"source": ["key1", "key2"]})] self.wrapped_func_aliases(source=["key3"]) assert self.calls[-1] == ((), {"source": ["key3"]}) def test_parameter_aliases_body(self): with pytest.warns( DeprecationWarning, match=( "Using 'source' alias in 'body' is deprecated and will be removed in a future version of elasticsearch-py. " "Use '_source' directly instead." ), ): self.wrapped_func_aliases(body={"source": ["key4"]}) # using the correct name does not warn with warnings.catch_warnings(): warnings.simplefilter("error") self.wrapped_func_aliases(body={"_source": ["key4"]}) def test_parameter_aliases_body_param(self): with pytest.warns( DeprecationWarning, match=( "Received 'source' via a specific parameter in the presence of a " "'body' parameter, which is deprecated and will be removed in a future " "version. Instead, use only 'body' or only specific parameters." ), ): self.wrapped_func_aliases( source=["key4"], body={"query": {"match_all": {}}} ) # using the correct name does not warn with warnings.catch_warnings(): warnings.simplefilter("error") self.wrapped_func_aliases( body={"query": {"match_all": {}}, "_source": ["key4"]} ) @pytest.mark.parametrize("client_cls", [Elasticsearch, AsyncElasticsearch]) def test_positional_argument_error(self, client_cls): client = client_cls("https://localhost:9200") with pytest.raises(TypeError) as e: client.search("index") assert str(e.value) == ( "Positional arguments can't be used with Elasticsearch API methods. " "Instead only use keyword arguments." ) with pytest.raises(TypeError) as e: client.indices.exists("index") assert str(e.value) == ( "Positional arguments can't be used with Elasticsearch API methods. " "Instead only use keyword arguments." ) python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_serializers.py000066400000000000000000000125011506101734100300030ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from test_elasticsearch.test_cases import DummyTransportTestCase EXPECTED_SERIALIZERS = { "application/json", "text/*", "application/x-ndjson", "application/vnd.mapbox-vector-tile", "application/vnd.elasticsearch+json", "application/vnd.elasticsearch+x-ndjson", } try: import pyarrow as pa EXPECTED_SERIALIZERS.add("application/vnd.apache.arrow.stream") except ImportError: pa = None class TestSerializers(DummyTransportTestCase): def test_compat_mode_on_by_default(self): calls = self.client.transport.calls # Get, never uses a body self.client.get(index="test0", id="1") assert len(calls) == 1 assert calls[("GET", "/test0/_doc/1")][0]["headers"] == { "Accept": "application/vnd.elasticsearch+json; compatible-with=9" } # Search with body self.client.search(index="test1", query={"match_all": {}}) assert len(calls) == 2 assert calls[("POST", "/test1/_search")][0]["headers"] == { "Accept": "application/vnd.elasticsearch+json; compatible-with=9", "Content-Type": "application/vnd.elasticsearch+json; compatible-with=9", } # Search without body self.client.search(index="test2") assert len(calls) == 3 assert calls[("POST", "/test2/_search")][0]["headers"] == { "Accept": "application/vnd.elasticsearch+json; compatible-with=9", } # Multiple mimetypes in Accept self.client.cat.nodes() assert len(calls) == 4 assert calls[("GET", "/_cat/nodes")][0]["headers"] == { # text/plain isn't modified. "Accept": "text/plain,application/vnd.elasticsearch+json; compatible-with=9", } # Bulk uses x-ndjson self.client.bulk(operations=[]) assert len(calls) == 5 assert calls[("PUT", "/_bulk")][0]["headers"] == { "Accept": "application/vnd.elasticsearch+json; compatible-with=9", "Content-Type": "application/vnd.elasticsearch+x-ndjson; compatible-with=9", } # Mapbox vector tiles self.client.search_mvt( index="test3", field="field", zoom="z", y="y", x="x", query={"match_all": {}}, ) assert len(calls) == 6 assert calls[("POST", "/test3/_mvt/field/z/x/y")][0]["headers"] == { "Accept": "application/vnd.elasticsearch+vnd.mapbox-vector-tile; compatible-with=9", "Content-Type": "application/vnd.elasticsearch+json; compatible-with=9", } @pytest.mark.parametrize("mime_subtype", ["json", "x-ndjson"]) def test_compat_serializers_used_when_given_non_compat( self, mime_subtype: str ) -> None: class CustomSerializer: pass ser = CustomSerializer() client = Elasticsearch( "https://localhost:9200", serializers={f"application/{mime_subtype}": ser} ) serializers = client.transport.serializers.serializers assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers[f"application/{mime_subtype}"] is ser assert serializers[f"application/vnd.elasticsearch+{mime_subtype}"] is ser @pytest.mark.parametrize("mime_subtype", ["json", "x-ndjson"]) def test_compat_serializers_used_when_given_compat(self, mime_subtype: str) -> None: class CustomSerializer: pass ser1 = CustomSerializer() ser2 = CustomSerializer() client = Elasticsearch( "https://localhost:9200", serializers={ f"application/{mime_subtype}": ser1, f"application/vnd.elasticsearch+{mime_subtype}": ser2, }, ) serializers = client.transport.serializers.serializers assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers[f"application/{mime_subtype}"] is ser1 assert serializers[f"application/vnd.elasticsearch+{mime_subtype}"] is ser2 def test_compat_serializer_used_when_given_non_compat(self) -> None: class CustomSerializer: mimetype: str = "application/json" ser = CustomSerializer() client = Elasticsearch("https://localhost:9200", serializer=ser) serializers = client.transport.serializers.serializers assert set(serializers.keys()) == EXPECTED_SERIALIZERS assert serializers["application/json"] is ser assert serializers["application/vnd.elasticsearch+json"] is ser python-elasticsearch-9.1.1/test_elasticsearch/test_client/test_utils.py000066400000000000000000000045571506101734100266230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import warnings import pytest from elasticsearch._sync.client.utils import Stability, _quote, _stability_warning from elasticsearch.exceptions import GeneralAvailabilityWarning def test_handles_ascii(): string = "abc123" assert "abc123" == _quote(string) def test_handles_bytestring(): string = b"celery-task-meta-c4f1201f-eb7b-41d5-9318-a75a8cfbdaa0" assert string.decode() == _quote(string) def test_handles_unicode(): assert "some-index-type-%E4%B8%AD%E6%96%87" == _quote("some-index-type-中文") def test_handles_unicode2(): string = "中*文," assert "%E4%B8%AD*%E6%96%87," == _quote(string) class TestStabilityWarning: def test_default(self): @_stability_warning(stability=Stability.STABLE) def func_default(*args, **kwargs): pass with warnings.catch_warnings(): warnings.simplefilter("error") func_default() def test_beta(self, recwarn): @_stability_warning(stability=Stability.BETA) def func_beta(*args, **kwargs): pass with pytest.warns( GeneralAvailabilityWarning, match="This API is in beta and is subject to change.", ): func_beta() def test_experimental(self, recwarn): @_stability_warning(stability=Stability.EXPERIMENTAL) def func_experimental(*args, **kwargs): pass with pytest.warns( GeneralAvailabilityWarning, match="This API is in technical preview and may be changed or removed in a future release.", ): func_experimental() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/000077500000000000000000000000001506101734100233435ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/__init__.py000066400000000000000000000014231506101734100254540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/000077500000000000000000000000001506101734100246175ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/__init__.py000066400000000000000000000014231506101734100267300ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_document.py000066400000000000000000000563341506101734100300610ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # this file creates several documents using bad or no types because # these are still supported and should be kept functional in spite # of not having appropriate type hints. For that reason the comment # below disables many mypy checks that fails as a result of this. # mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" import codecs import ipaddress import pickle import sys from datetime import datetime from hashlib import md5 from typing import Any, ClassVar, Dict, List, Optional import pytest from pytest import raises from elasticsearch.dsl import ( AsyncDocument, Index, InnerDoc, M, Mapping, MetaField, Range, analyzer, field, mapped_field, utils, ) from elasticsearch.dsl.document_base import InstrumentedField from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException class MyInner(InnerDoc): old_field = field.Text() class MyDoc(AsyncDocument): title = field.Keyword() name = field.Text() created_at = field.Date() inner = field.Object(MyInner) class MySubDoc(MyDoc): name = field.Keyword() class Index: name = "default-index" class MyDoc2(AsyncDocument): extra = field.Long() class MyMultiSubDoc(MyDoc2, MySubDoc): pass class Comment(InnerDoc): title = field.Text() tags = field.Keyword(multi=True) class DocWithNested(AsyncDocument): comments = field.Nested(Comment) class Index: name = "test-doc-with-nested" class SimpleCommit(AsyncDocument): files = field.Text(multi=True) class Index: name = "test-git" class Secret(str): pass class SecretField(field.CustomField): builtin_type = "text" def _serialize(self, data: Any) -> Any: return codecs.encode(data, "rot_13") def _deserialize(self, data: Any) -> Any: if isinstance(data, Secret): return data return Secret(codecs.decode(data, "rot_13")) class SecretDoc(AsyncDocument): title = SecretField(index="no") class Index: name = "test-secret-doc" class NestedSecret(AsyncDocument): secrets = field.Nested(SecretDoc) class Index: name = "test-nested-secret" class OptionalObjectWithRequiredField(AsyncDocument): comments = field.Nested(properties={"title": field.Keyword(required=True)}) class Index: name = "test-required" class Host(AsyncDocument): ip = field.Ip() class Index: name = "test-host" def test_range_serializes_properly() -> None: class D(AsyncDocument): lr: Range[int] = field.LongRange() d = D(lr=Range(lt=42)) assert 40 in d.lr assert 47 not in d.lr assert {"lr": {"lt": 42}} == d.to_dict() d = D(lr={"lt": 42}) assert {"lr": {"lt": 42}} == d.to_dict() def test_range_deserializes_properly() -> None: class D(InnerDoc): lr = field.LongRange() d = D.from_es({"lr": {"lt": 42}}, True) assert isinstance(d.lr, Range) assert 40 in d.lr assert 47 not in d.lr def test_resolve_nested() -> None: nested, field = NestedSecret._index.resolve_nested("secrets.title") assert nested == ["secrets"] assert field is NestedSecret._doc_type.mapping["secrets"]["title"] def test_conflicting_mapping_raises_error_in_index_to_dict() -> None: class A(AsyncDocument): name = field.Text() class B(AsyncDocument): name = field.Keyword() i = Index("i") i.document(A) i.document(B) with raises(ValueError): i.to_dict() def test_ip_address_serializes_properly() -> None: host = Host(ip=ipaddress.IPv4Address("10.0.0.1")) assert {"ip": "10.0.0.1"} == host.to_dict() def test_matches_uses_index() -> None: assert SimpleCommit._matches({"_index": "test-git"}) assert not SimpleCommit._matches({"_index": "not-test-git"}) def test_matches_with_no_name_always_matches() -> None: class D(AsyncDocument): pass assert D._matches({}) assert D._matches({"_index": "whatever"}) def test_matches_accepts_wildcards() -> None: class MyDoc(AsyncDocument): class Index: name = "my-*" assert MyDoc._matches({"_index": "my-index"}) assert not MyDoc._matches({"_index": "not-my-index"}) def test_assigning_attrlist_to_field() -> None: sc = SimpleCommit() l = ["README", "README.rst"] sc.files = utils.AttrList(l) assert sc.to_dict()["files"] is l def test_optional_inner_objects_are_not_validated_if_missing() -> None: d = OptionalObjectWithRequiredField() d.full_clean() def test_custom_field() -> None: s = SecretDoc(title=Secret("Hello")) assert {"title": "Uryyb"} == s.to_dict() assert s.title == "Hello" s = SecretDoc.from_es({"_source": {"title": "Uryyb"}}) assert s.title == "Hello" assert isinstance(s.title, Secret) def test_custom_field_mapping() -> None: assert { "properties": {"title": {"index": "no", "type": "text"}} } == SecretDoc._doc_type.mapping.to_dict() def test_custom_field_in_nested() -> None: s = NestedSecret() s.secrets.append(SecretDoc(title=Secret("Hello"))) assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict() assert s.secrets[0].title == "Hello" def test_multi_works_after_doc_has_been_saved() -> None: c = SimpleCommit() c.full_clean() c.files.append("setup.py") assert c.to_dict() == {"files": ["setup.py"]} def test_multi_works_in_nested_after_doc_has_been_serialized() -> None: # Issue #359 c = DocWithNested(comments=[Comment(title="First!")]) assert [] == c.comments[0].tags assert {"comments": [{"title": "First!"}]} == c.to_dict() assert [] == c.comments[0].tags def test_null_value_for_object() -> None: d = MyDoc(inner=None) assert d.inner is None def test_inherited_doc_types_can_override_index() -> None: class MyDocDifferentIndex(MySubDoc): class Index: name = "not-default-index" settings = {"number_of_replicas": 0} aliases: Dict[str, Any] = {"a": {}} analyzers = [analyzer("my_analizer", tokenizer="keyword")] assert MyDocDifferentIndex._index._name == "not-default-index" assert MyDocDifferentIndex()._get_index() == "not-default-index" assert MyDocDifferentIndex._index.to_dict() == { "aliases": {"a": {}}, "mappings": { "properties": { "created_at": {"type": "date"}, "inner": { "type": "object", "properties": {"old_field": {"type": "text"}}, }, "name": {"type": "keyword"}, "title": {"type": "keyword"}, } }, "settings": { "analysis": { "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}} }, "number_of_replicas": 0, }, } def test_to_dict_with_meta() -> None: d = MySubDoc(title="hello") d.meta.routing = "some-parent" assert { "_index": "default-index", "_routing": "some-parent", "_source": {"title": "hello"}, } == d.to_dict(True) def test_to_dict_with_meta_includes_custom_index() -> None: d = MySubDoc(title="hello") d.meta.index = "other-index" assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True) def test_to_dict_without_skip_empty_will_include_empty_fields() -> None: d = MySubDoc(tags=[], title=None, inner={}) assert {} == d.to_dict() assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False) def test_attribute_can_be_removed() -> None: d = MyDoc(title="hello") del d.title assert "title" not in d._d_ def test_doc_type_can_be_correctly_pickled() -> None: d = DocWithNested( title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42} ) s = pickle.dumps(d) d2 = pickle.loads(s) assert d2 == d assert 42 == d2.meta.id assert "Hello World!" == d2.title assert [{"title": "hellp"}] == d2.comments assert isinstance(d2.comments[0], Comment) def test_meta_is_accessible_even_on_empty_doc() -> None: d = MyDoc() d.meta d = MyDoc(title="aaa") d.meta def test_meta_field_mapping() -> None: class User(AsyncDocument): username = field.Text() class Meta: all = MetaField(enabled=False) _index = MetaField(enabled=True) dynamic = MetaField("strict") dynamic_templates = MetaField([42]) assert { "properties": {"username": {"type": "text"}}, "_all": {"enabled": False}, "_index": {"enabled": True}, "dynamic": "strict", "dynamic_templates": [42], } == User._doc_type.mapping.to_dict() def test_multi_value_fields() -> None: class Blog(AsyncDocument): tags = field.Keyword(multi=True) b = Blog() assert [] == b.tags b.tags.append("search") b.tags.append("python") assert ["search", "python"] == b.tags def test_docs_with_properties() -> None: class User(AsyncDocument): pwd_hash: str = field.Text() def check_password(self, pwd: bytes) -> bool: return md5(pwd).hexdigest() == self.pwd_hash @property def password(self) -> None: raise AttributeError("readonly") @password.setter def password(self, pwd: bytes) -> None: self.pwd_hash = md5(pwd).hexdigest() u = User(pwd_hash=md5(b"secret").hexdigest()) assert u.check_password(b"secret") assert not u.check_password(b"not-secret") u.password = b"not-secret" assert "password" not in u._d_ assert not u.check_password(b"secret") assert u.check_password(b"not-secret") with raises(AttributeError): u.password def test_nested_can_be_assigned_to() -> None: d1 = DocWithNested(comments=[Comment(title="First!")]) d2 = DocWithNested() d2.comments = d1.comments assert isinstance(d1.comments[0], Comment) assert d2.comments == [{"title": "First!"}] assert {"comments": [{"title": "First!"}]} == d2.to_dict() assert isinstance(d2.comments[0], Comment) def test_nested_can_be_none() -> None: d = DocWithNested(comments=None, title="Hello World!") assert {"title": "Hello World!"} == d.to_dict() def test_nested_defaults_to_list_and_can_be_updated() -> None: md = DocWithNested() assert [] == md.comments md.comments.append({"title": "hello World!"}) assert {"comments": [{"title": "hello World!"}]} == md.to_dict() def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None: md = MyDoc(name=["a", "b", "c"]) md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")] assert isinstance(md.inner[0], MyInner) assert { "name": ["a", "b", "c"], "inner": [{"old_field": "of1"}, {"old_field": "of2"}], } == md.to_dict() def test_to_dict_ignores_empty_collections() -> None: md = MySubDoc(name="", address={}, count=0, valid=False, tags=[]) assert {"name": "", "count": 0, "valid": False} == md.to_dict() def test_declarative_mapping_definition() -> None: assert issubclass(MyDoc, AsyncDocument) assert hasattr(MyDoc, "_doc_type") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "text"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, } } == MyDoc._doc_type.mapping.to_dict() def test_you_can_supply_own_mapping_instance() -> None: class MyD(AsyncDocument): title = field.Text() class Meta: mapping = Mapping() mapping.meta("_all", enabled=False) assert { "_all": {"enabled": False}, "properties": {"title": {"type": "text"}}, } == MyD._doc_type.mapping.to_dict() def test_document_can_be_created_dynamically() -> None: n = datetime.now() md = MyDoc(title="hello") md.name = "My Fancy Document!" md.created_at = n inner = md.inner # consistent returns assert inner is md.inner inner.old_field = "Already defined." md.inner.new_field = ["undefined", "field"] assert { "title": "hello", "name": "My Fancy Document!", "created_at": n, "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]}, } == md.to_dict() def test_invalid_date_will_raise_exception() -> None: md = MyDoc() md.created_at = "not-a-date" with raises(ValidationException): md.full_clean() def test_document_inheritance() -> None: assert issubclass(MySubDoc, MyDoc) assert issubclass(MySubDoc, AsyncDocument) assert hasattr(MySubDoc, "_doc_type") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "keyword"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, } } == MySubDoc._doc_type.mapping.to_dict() def test_child_class_can_override_parent() -> None: class A(AsyncDocument): o = field.Object(dynamic=False, properties={"a": field.Text()}) class B(A): o = field.Object(dynamic="strict", properties={"b": field.Text()}) assert { "properties": { "o": { "dynamic": "strict", "properties": {"a": {"type": "text"}, "b": {"type": "text"}}, "type": "object", } } } == B._doc_type.mapping.to_dict() def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None: md = MySubDoc(meta={"id": 42}, name="My First doc!") md.meta.index = "my-index" assert md.meta.index == "my-index" assert md.meta.id == 42 assert {"name": "My First doc!"} == md.to_dict() assert {"id": 42, "index": "my-index"} == md.meta.to_dict() def test_index_inheritance() -> None: assert issubclass(MyMultiSubDoc, MySubDoc) assert issubclass(MyMultiSubDoc, MyDoc2) assert issubclass(MyMultiSubDoc, AsyncDocument) assert hasattr(MyMultiSubDoc, "_doc_type") assert hasattr(MyMultiSubDoc, "_index") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "keyword"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, "extra": {"type": "long"}, } } == MyMultiSubDoc._doc_type.mapping.to_dict() def test_meta_fields_can_be_set_directly_in_init() -> None: p = object() md = MyDoc(_id=p, title="Hello World!") assert md.meta.id is p @pytest.mark.asyncio async def test_save_no_index(async_mock_client: Any) -> None: md = MyDoc() with raises(ValidationException): await md.save(using="mock") @pytest.mark.asyncio async def test_delete_no_index(async_mock_client: Any) -> None: md = MyDoc() with raises(ValidationException): await md.delete(using="mock") @pytest.mark.asyncio async def test_update_no_fields() -> None: md = MyDoc() with raises(IllegalOperation): await md.update() def test_search_with_custom_alias_and_index() -> None: search_object = MyDoc.search( using="staging", index=["custom_index1", "custom_index2"] ) assert search_object._using == "staging" assert search_object._index == ["custom_index1", "custom_index2"] def test_from_es_respects_underscored_non_meta_fields() -> None: doc = { "_index": "test-index", "_id": "elasticsearch", "_score": 12.0, "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]}, "_source": { "city": "Amsterdam", "name": "Elasticsearch", "_tagline": "You know, for search", }, } class Company(AsyncDocument): class Index: name = "test-company" c = Company.from_es(doc) assert c.meta.fields._tags == ["search"] assert c.meta.fields._routing == "es" assert c._tagline == "You know, for search" def test_nested_and_object_inner_doc() -> None: class MySubDocWithNested(MyDoc): nested_inner = field.Nested(MyInner) props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"] assert props == { "created_at": {"type": "date"}, "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"}, "name": {"type": "text"}, "nested_inner": { "properties": {"old_field": {"type": "text"}}, "type": "nested", }, "title": {"type": "keyword"}, } def test_doc_with_type_hints() -> None: class TypedInnerDoc(InnerDoc): st: M[str] dt: M[Optional[datetime]] li: M[List[int]] class TypedDoc(AsyncDocument): st: str dt: Optional[datetime] li: List[int] ob: TypedInnerDoc ns: List[TypedInnerDoc] ip: Optional[str] = field.Ip() k1: str = field.Keyword(required=True) k2: M[str] = field.Keyword() k3: str = mapped_field(field.Keyword(), default="foo") k4: M[Optional[str]] = mapped_field(field.Keyword()) # type: ignore[misc] s1: Secret = SecretField() s2: M[Secret] = SecretField() s3: Secret = mapped_field(SecretField()) # type: ignore[misc] s4: M[Optional[Secret]] = mapped_field( SecretField(), default_factory=lambda: "foo" ) i1: ClassVar i2: ClassVar[int] props = TypedDoc._doc_type.mapping.to_dict()["properties"] assert props == { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, "ob": { "type": "object", "properties": { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, }, }, "ns": { "type": "nested", "properties": { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, }, }, "ip": {"type": "ip"}, "k1": {"type": "keyword"}, "k2": {"type": "keyword"}, "k3": {"type": "keyword"}, "k4": {"type": "keyword"}, "s1": {"type": "text"}, "s2": {"type": "text"}, "s3": {"type": "text"}, "s4": {"type": "text"}, } TypedDoc.i1 = "foo" TypedDoc.i2 = 123 doc = TypedDoc() assert doc.k3 == "foo" assert doc.s4 == "foo" with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == { "st", "k1", "k2", "ob", "s1", "s2", "s3", } assert TypedDoc.i1 == "foo" assert TypedDoc.i2 == 123 doc.st = "s" doc.li = [1, 2, 3] doc.k1 = "k1" doc.k2 = "k2" doc.ob.st = "s" doc.ob.li = [1] doc.s1 = "s1" doc.s2 = "s2" doc.s3 = "s3" doc.full_clean() doc.ob = TypedInnerDoc(li=[1]) with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == {"ob"} assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"} doc.ob.st = "s" doc.ns.append(TypedInnerDoc(li=[1, 2])) with raises(ValidationException) as exc_info: doc.full_clean() doc.ns[0].st = "s" doc.full_clean() doc.ip = "1.2.3.4" n = datetime.now() doc.dt = n assert doc.to_dict() == { "st": "s", "li": [1, 2, 3], "dt": n, "ob": { "st": "s", "li": [1], }, "ns": [ { "st": "s", "li": [1, 2], } ], "ip": "1.2.3.4", "k1": "k1", "k2": "k2", "k3": "foo", "s1": "s1", "s2": "s2", "s3": "s3", "s4": "foo", } s = TypedDoc.search().sort(TypedDoc.st, -TypedDoc.dt, +TypedDoc.ob.st) s.aggs.bucket("terms_agg", "terms", field=TypedDoc.k1) assert s.to_dict() == { "aggs": {"terms_agg": {"terms": {"field": "k1"}}}, "sort": ["st", {"dt": {"order": "desc"}}, "ob.st"], } @pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10") def test_doc_with_pipe_type_hints() -> None: with pytest.raises(TypeError): class BadlyTypedDoc(AsyncDocument): s: str f: str | int | None # type: ignore[syntax] class TypedDoc(AsyncDocument): s: str f1: str | None # type: ignore[syntax] f2: M[int | None] # type: ignore[syntax] f3: M[datetime | None] # type: ignore[syntax] props = TypedDoc._doc_type.mapping.to_dict()["properties"] assert props == { "s": {"type": "text"}, "f1": {"type": "text"}, "f2": {"type": "integer"}, "f3": {"type": "date"}, } doc = TypedDoc() with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == {"s"} doc.s = "s" doc.full_clean() def test_instrumented_field() -> None: class Child(InnerDoc): st: M[str] class Doc(AsyncDocument): st: str ob: Child ns: List[Child] doc = Doc( st="foo", ob=Child(st="bar"), ns=[ Child(st="baz"), Child(st="qux"), ], ) assert type(doc.st) is str assert doc.st == "foo" assert type(doc.ob) is Child assert doc.ob.st == "bar" assert type(doc.ns) is utils.AttrList assert doc.ns[0].st == "baz" assert doc.ns[1].st == "qux" assert type(doc.ns[0]) is Child assert type(doc.ns[1]) is Child assert type(Doc.st) is InstrumentedField assert str(Doc.st) == "st" assert +Doc.st == "st" assert -Doc.st == "-st" assert Doc.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.st.something assert type(Doc.ob) is InstrumentedField assert str(Doc.ob) == "ob" assert str(Doc.ob.st) == "ob.st" assert +Doc.ob.st == "ob.st" assert -Doc.ob.st == "-ob.st" assert Doc.ob.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.ob.something with raises(AttributeError): Doc.ob.st.something assert type(Doc.ns) is InstrumentedField assert str(Doc.ns) == "ns" assert str(Doc.ns.st) == "ns.st" assert +Doc.ns.st == "ns.st" assert -Doc.ns.st == "-ns.st" assert Doc.ns.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.ns.something with raises(AttributeError): Doc.ns.st.something python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_faceted_search.py000066400000000000000000000146611506101734100311600ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import pytest from elasticsearch.dsl.faceted_search import ( AsyncFacetedSearch, DateHistogramFacet, TermsFacet, ) class BlogSearch(AsyncFacetedSearch): doc_types = ["user", "post"] fields = [ "title^5", "body", ] facets = { "category": TermsFacet(field="category.raw"), "tags": TermsFacet(field="tags"), } def test_query_is_created_properly() -> None: bs = BlogSearch("python search") s = bs.build_search() assert s._doc_type == ["user", "post"] assert { "aggs": { "_filter_tags": { "filter": {"match_all": {}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, } == s.to_dict() def test_query_is_created_properly_with_sort_tuple() -> None: bs = BlogSearch("python search", sort=("category", "-title")) s = bs.build_search() assert s._doc_type == ["user", "post"] assert { "aggs": { "_filter_tags": { "filter": {"match_all": {}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, "sort": ["category", {"title": {"order": "desc"}}], } == s.to_dict() def test_filter_is_applied_to_search_but_not_relevant_facet() -> None: bs = BlogSearch("python search", filters={"category": "elastic"}) s = bs.build_search() assert { "aggs": { "_filter_tags": { "filter": {"terms": {"category.raw": ["elastic"]}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "post_filter": {"terms": {"category.raw": ["elastic"]}}, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, } == s.to_dict() def test_filters_are_applied_to_search_ant_relevant_facets() -> None: bs = BlogSearch( "python search", filters={"category": "elastic", "tags": ["python", "django"]} ) s = bs.build_search() d = s.to_dict() # we need to test post_filter without relying on order f = d["post_filter"]["bool"].pop("must") assert len(f) == 2 assert {"terms": {"category.raw": ["elastic"]}} in f assert {"terms": {"tags": ["python", "django"]}} in f assert { "aggs": { "_filter_tags": { "filter": {"terms": {"category.raw": ["elastic"]}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"terms": {"tags": ["python", "django"]}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "post_filter": {"bool": {}}, "highlight": {"fields": {"body": {}, "title": {}}}, } == d def test_date_histogram_facet_with_1970_01_01_date() -> None: dhf = DateHistogramFacet() assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] @pytest.mark.parametrize( ["interval_type", "interval"], [ ("interval", "year"), ("calendar_interval", "year"), ("interval", "month"), ("calendar_interval", "month"), ("interval", "week"), ("calendar_interval", "week"), ("interval", "day"), ("calendar_interval", "day"), ("fixed_interval", "day"), ("interval", "hour"), ("fixed_interval", "hour"), ("interval", "1Y"), ("calendar_interval", "1Y"), ("interval", "1M"), ("calendar_interval", "1M"), ("interval", "1w"), ("calendar_interval", "1w"), ("interval", "1d"), ("calendar_interval", "1d"), ("fixed_interval", "1d"), ("interval", "1h"), ("fixed_interval", "1h"), ], ) def test_date_histogram_interval_types(interval_type: str, interval: str) -> None: dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval}) assert dhf.get_aggregation().to_dict() == { "date_histogram": { "field": "@timestamp", interval_type: interval, "min_doc_count": 0, } } dhf.get_value_filter(datetime.now()) def test_date_histogram_no_interval_keyerror() -> None: dhf = DateHistogramFacet(field="@timestamp") with pytest.raises(KeyError) as e: dhf.get_value_filter(datetime.now()) assert str(e.value) == "'interval'" def test_params_added_to_search() -> None: bs = BlogSearch("python search") assert bs._s._params == {} bs.params(routing="42") assert bs._s._params == {"routing": "42"} python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_index.py000066400000000000000000000131421506101734100273400ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import string from random import choice from typing import Any, Dict import pytest from pytest import raises from elasticsearch.dsl import ( AsyncDocument, AsyncIndex, AsyncIndexTemplate, Date, Text, analyzer, ) class Post(AsyncDocument): title = Text() published_from = Date() def test_multiple_doc_types_will_combine_mappings() -> None: class User(AsyncDocument): username = Text() i = AsyncIndex("i") i.document(Post) i.document(User) assert { "mappings": { "properties": { "title": {"type": "text"}, "username": {"type": "text"}, "published_from": {"type": "date"}, } } } == i.to_dict() def test_search_is_limited_to_index_name() -> None: i = AsyncIndex("my-index") s = i.search() assert s._index == ["my-index"] def test_cloned_index_has_copied_settings_and_using() -> None: client = object() i = AsyncIndex("my-index", using=client) # type: ignore[arg-type] i.settings(number_of_shards=1) i2 = i.clone("my-other-index") assert "my-other-index" == i2._name assert client is i2._using assert i._settings == i2._settings assert i._settings is not i2._settings def test_cloned_index_has_analysis_attribute() -> None: """ Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying over the `_analysis` attribute. """ client = object() i = AsyncIndex("my-index", using=client) # type: ignore[arg-type] random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) i.analyzer(random_analyzer) i2 = i.clone("my-clone-index") assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"] def test_settings_are_saved() -> None: i = AsyncIndex("i") i.settings(number_of_replicas=0) i.settings(number_of_shards=1) assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict() def test_registered_doc_type_included_in_to_dict() -> None: i = AsyncIndex("i", using="alias") i.document(Post) assert { "mappings": { "properties": { "title": {"type": "text"}, "published_from": {"type": "date"}, } } } == i.to_dict() def test_registered_doc_type_included_in_search() -> None: i = AsyncIndex("i", using="alias") i.document(Post) s = i.search() assert s._doc_type == [Post] def test_aliases_add_to_object() -> None: random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) alias_dict: Dict[str, Any] = {random_alias: {}} index = AsyncIndex("i", using="alias") index.aliases(**alias_dict) assert index._aliases == alias_dict def test_aliases_returned_from_to_dict() -> None: random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) alias_dict: Dict[str, Any] = {random_alias: {}} index = AsyncIndex("i", using="alias") index.aliases(**alias_dict) assert index._aliases == index.to_dict()["aliases"] == alias_dict def test_analyzers_added_to_object() -> None: random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) index = AsyncIndex("i", using="alias") index.analyzer(random_analyzer) assert index._analysis["analyzer"][random_analyzer_name] == { "filter": ["standard"], "type": "custom", "tokenizer": "standard", } def test_analyzers_returned_from_to_dict() -> None: random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) index = AsyncIndex("i", using="alias") index.analyzer(random_analyzer) assert index.to_dict()["settings"]["analysis"]["analyzer"][ random_analyzer_name ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"} def test_conflicting_analyzer_raises_error() -> None: i = AsyncIndex("i") i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"]) with raises(ValueError): i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"]) def test_index_template_can_have_order() -> None: i = AsyncIndex("i-*") it = i.as_template("i", order=2) assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict() @pytest.mark.asyncio async def test_index_template_save_result(async_mock_client: Any) -> None: it = AsyncIndexTemplate("test-template", "test-*") assert await it.save(using="mock") == await async_mock_client.indices.put_template() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_mapping.py000066400000000000000000000164461506101734100276760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from elasticsearch.dsl import AsyncMapping, Keyword, Nested, Text, analysis def test_mapping_can_has_fields() -> None: m = AsyncMapping() m.field("name", "text").field("tags", "keyword") assert { "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}} } == m.to_dict() def test_mapping_update_is_recursive() -> None: m1 = AsyncMapping() m1.field("title", "text") m1.field("author", "object") m1.field("author", "object", properties={"name": {"type": "text"}}) m1.meta("_all", enabled=False) m1.meta("dynamic", False) m2 = AsyncMapping() m2.field("published_from", "date") m2.field("author", "object", properties={"email": {"type": "text"}}) m2.field("title", "text") m2.field("lang", "keyword") m2.meta("_analyzer", path="lang") m1.update(m2, update_only=True) assert { "_all": {"enabled": False}, "_analyzer": {"path": "lang"}, "dynamic": False, "properties": { "published_from": {"type": "date"}, "title": {"type": "text"}, "lang": {"type": "keyword"}, "author": { "type": "object", "properties": {"name": {"type": "text"}, "email": {"type": "text"}}, }, }, } == m1.to_dict() def test_properties_can_iterate_over_all_the_fields() -> None: m = AsyncMapping() m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")}) m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")})) assert {"f1", "f2", "f3", "f4"} == { f.test_attr for f in m.properties._collect_fields() } def test_mapping_can_collect_all_analyzers_and_normalizers() -> None: a1 = analysis.analyzer( "my_analyzer1", tokenizer="keyword", filter=[ "lowercase", analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), ], ) a2 = analysis.analyzer("english") a3 = analysis.analyzer("unknown_custom") a4 = analysis.analyzer( "my_analyzer2", tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], ) a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword") n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"]) n2 = analysis.normalizer( "my_normalizer2", filter=[ "my_filter1", "my_filter2", analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]), ], ) n3 = analysis.normalizer("unknown_custom") m = AsyncMapping() m.field( "title", "text", analyzer=a1, fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)}, ) m.field("comments", Nested(properties={"author": Text(analyzer=a4)})) m.field("normalized_title", "keyword", normalizer=n1) m.field("normalized_comment", "keyword", normalizer=n2) m.field("unknown", "keyword", normalizer=n3) m.meta("_all", analyzer=a5) assert { "analyzer": { "my_analyzer1": { "filter": ["lowercase", "my_filter1"], "tokenizer": "keyword", "type": "custom", }, "my_analyzer2": { "filter": ["my_filter2"], "tokenizer": "trigram", "type": "custom", }, "my_analyzer3": {"tokenizer": "keyword", "type": "custom"}, }, "normalizer": { "my_normalizer1": {"filter": ["lowercase"], "type": "custom"}, "my_normalizer2": { "filter": ["my_filter1", "my_filter2", "my_filter3"], "type": "custom", }, }, "filter": { "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, "my_filter3": {"stopwords": ["e", "f"], "type": "stop"}, }, "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, } == m._collect_analysis() assert json.loads(json.dumps(m.to_dict())) == m.to_dict() def test_mapping_can_collect_multiple_analyzers() -> None: a1 = analysis.analyzer( "my_analyzer1", tokenizer="keyword", filter=[ "lowercase", analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), ], ) a2 = analysis.analyzer( "my_analyzer2", tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], ) m = AsyncMapping() m.field("title", "text", analyzer=a1, search_analyzer=a2) m.field( "text", "text", analyzer=a1, fields={ "english": Text(analyzer=a1), "unknown": Keyword(analyzer=a1, search_analyzer=a2), }, ) assert { "analyzer": { "my_analyzer1": { "filter": ["lowercase", "my_filter1"], "tokenizer": "keyword", "type": "custom", }, "my_analyzer2": { "filter": ["my_filter2"], "tokenizer": "trigram", "type": "custom", }, }, "filter": { "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, }, "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, } == m._collect_analysis() def test_even_non_custom_analyzers_can_have_params() -> None: a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+") m = AsyncMapping() m.field("title", "text", analyzer=a1) assert { "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}} } == m._collect_analysis() def test_resolve_field_can_resolve_multifields() -> None: m = AsyncMapping() m.field("title", "text", fields={"keyword": Keyword()}) assert isinstance(m.resolve_field("title.keyword"), Keyword) def test_resolve_nested() -> None: m = AsyncMapping() m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})}) m.field("k2", "keyword") nested, field = m.resolve_nested("n1.n2.k1") assert nested == ["n1", "n1.n2"] assert isinstance(field, Keyword) nested, field = m.resolve_nested("k2") assert nested == [] assert isinstance(field, Keyword) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_search.py000066400000000000000000000603551506101734100275060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from copy import deepcopy from typing import Any import pytest from pytest import raises from elasticsearch.dsl import ( AsyncEmptySearch, AsyncSearch, Document, Q, query, types, wrappers, ) from elasticsearch.dsl.exceptions import IllegalOperation def test_expand__to_dot_is_respected() -> None: s = AsyncSearch().query("match", a__b=42, _expand__to_dot=False) assert {"query": {"match": {"a__b": 42}}} == s.to_dict() @pytest.mark.asyncio async def test_execute_uses_cache() -> None: s = AsyncSearch() r = object() s._response = r # type: ignore[assignment] assert r is await s.execute() @pytest.mark.asyncio async def test_cache_can_be_ignored(async_mock_client: Any) -> None: s = AsyncSearch(using="mock") r = object() s._response = r # type: ignore[assignment] await s.execute(ignore_cache=True) async_mock_client.search.assert_awaited_once_with(index=None, body={}) @pytest.mark.asyncio async def test_iter_iterates_over_hits() -> None: s = AsyncSearch() s._response = [1, 2, 3] # type: ignore[assignment] assert [1, 2, 3] == [hit async for hit in s] def test_cache_isnt_cloned() -> None: s = AsyncSearch() s._response = object() # type: ignore[assignment] assert not hasattr(s._clone(), "_response") def test_search_starts_with_no_query() -> None: s = AsyncSearch() assert s.query._proxied is None def test_search_query_combines_query() -> None: s = AsyncSearch() s2 = s.query("match", f=42) assert s2.query._proxied == query.Match(f=42) assert s.query._proxied is None s3 = s2.query("match", f=43) assert s2.query._proxied == query.Match(f=42) assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)]) def test_query_can_be_assigned_to() -> None: s = AsyncSearch() q = Q("match", title="python") s.query = q # type: ignore assert s.query._proxied is q def test_query_can_be_wrapped() -> None: s = AsyncSearch().query("match", title="python") s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"}) # type: ignore assert { "query": { "function_score": { "functions": [{"field_value_factor": {"field": "rating"}}], "query": {"match": {"title": "python"}}, } } } == s.to_dict() def test_using() -> None: o = object() o2 = object() s = AsyncSearch(using=o) assert s._using is o s2 = s.using(o2) # type: ignore[arg-type] assert s._using is o assert s2._using is o2 def test_methods_are_proxied_to_the_query() -> None: s = AsyncSearch().query("match_all") assert s.query.to_dict() == {"match_all": {}} def test_query_always_returns_search() -> None: s = AsyncSearch() assert isinstance(s.query("match", f=42), AsyncSearch) def test_source_copied_on_clone() -> None: s = AsyncSearch().source(False) assert s._clone()._source == s._source assert s._clone()._source is False s2 = AsyncSearch().source([]) assert s2._clone()._source == s2._source assert s2._source == [] s3 = AsyncSearch().source(["some", "fields"]) assert s3._clone()._source == s3._source assert s3._clone()._source == ["some", "fields"] def test_copy_clones() -> None: from copy import copy s1 = AsyncSearch().source(["some", "fields"]) s2 = copy(s1) assert s1 == s2 assert s1 is not s2 def test_aggs_allow_two_metric() -> None: s = AsyncSearch() s.aggs.metric("a", "max", field="a").metric("b", "max", field="b") assert s.to_dict() == { "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}} } def test_aggs_get_copied_on_change() -> None: s = AsyncSearch().query("match_all") s.aggs.bucket("per_tag", "terms", field="f").metric( "max_score", "max", field="score" ) s2 = s.query("match_all") s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month") s3 = s2.query("match_all") s3.aggs["per_month"].metric("max_score", "max", field="score") s4 = s3._clone() s4.aggs.metric("max_score", "max", field="score") d: Any = { "query": {"match_all": {}}, "aggs": { "per_tag": { "terms": {"field": "f"}, "aggs": {"max_score": {"max": {"field": "score"}}}, } }, } assert d == s.to_dict() d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}} assert d == s2.to_dict() d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}} assert d == s3.to_dict() d["aggs"]["max_score"] = {"max": {"field": "score"}} assert d == s4.to_dict() def test_search_index() -> None: s = AsyncSearch(index="i") assert s._index == ["i"] s = s.index("i2") assert s._index == ["i", "i2"] s = s.index("i3") assert s._index == ["i", "i2", "i3"] s = s.index() assert s._index is None s = AsyncSearch(index=("i", "i2")) assert s._index == ["i", "i2"] s = AsyncSearch(index=["i", "i2"]) assert s._index == ["i", "i2"] s = AsyncSearch() s = s.index("i", "i2") assert s._index == ["i", "i2"] s2 = s.index("i3") assert s._index == ["i", "i2"] assert s2._index == ["i", "i2", "i3"] s = AsyncSearch() s = s.index(["i", "i2"], "i3") assert s._index == ["i", "i2", "i3"] s2 = s.index("i4") assert s._index == ["i", "i2", "i3"] assert s2._index == ["i", "i2", "i3", "i4"] s2 = s.index(["i4"]) assert s2._index == ["i", "i2", "i3", "i4"] s2 = s.index(("i4", "i5")) assert s2._index == ["i", "i2", "i3", "i4", "i5"] def test_doc_type_document_class() -> None: class MyDocument(Document): pass s = AsyncSearch(doc_type=MyDocument) assert s._doc_type == [MyDocument] assert s._doc_type_map == {} s = AsyncSearch().doc_type(MyDocument) assert s._doc_type == [MyDocument] assert s._doc_type_map == {} def test_knn() -> None: s = AsyncSearch() with raises(TypeError): s.knn() # type: ignore[call-arg] with raises(TypeError): s.knn("field") # type: ignore[call-arg] with raises(TypeError): s.knn("field", 5) # type: ignore[call-arg] with raises(ValueError): s.knn("field", 5, 100) with raises(ValueError): s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={}) s = s.knn("field", 5, 100, query_vector=[1, 2, 3]) assert { "knn": { "field": "field", "k": 5, "num_candidates": 100, "query_vector": [1, 2, 3], } } == s.to_dict() s = s.knn( k=4, num_candidates=40, boost=0.8, field="name", query_vector_builder={ "text_embedding": {"model_id": "foo", "model_text": "search text"} }, inner_hits={"size": 1}, ) assert { "knn": [ { "field": "field", "k": 5, "num_candidates": 100, "query_vector": [1, 2, 3], }, { "field": "name", "k": 4, "num_candidates": 40, "query_vector_builder": { "text_embedding": {"model_id": "foo", "model_text": "search text"} }, "boost": 0.8, "inner_hits": {"size": 1}, }, ] } == s.to_dict() def test_rank() -> None: s = AsyncSearch() s.rank(rrf=False) assert {} == s.to_dict() s = s.rank(rrf=True) assert {"rank": {"rrf": {}}} == s.to_dict() s = s.rank(rrf={"window_size": 50, "rank_constant": 20}) assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict() def test_sort() -> None: s = AsyncSearch() s = s.sort("fielda", "-fieldb") assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict() s = s.sort() assert [] == s._sort assert AsyncSearch().to_dict() == s.to_dict() def test_sort_by_score() -> None: s = AsyncSearch() s = s.sort("_score") assert {"sort": ["_score"]} == s.to_dict() s = AsyncSearch() with raises(IllegalOperation): s.sort("-_score") def test_collapse() -> None: s = AsyncSearch() inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]} s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4) assert { "field": "user.id", "inner_hits": { "name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}], }, "max_concurrent_group_searches": 4, } == s._collapse assert { "collapse": { "field": "user.id", "inner_hits": { "name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}], }, "max_concurrent_group_searches": 4, } } == s.to_dict() s = s.collapse() assert {} == s._collapse assert AsyncSearch().to_dict() == s.to_dict() def test_slice() -> None: s = AsyncSearch() assert {"from": 3, "size": 7} == s[3:10].to_dict() assert {"size": 5} == s[:5].to_dict() assert {"from": 3} == s[3:].to_dict() assert {"from": 0, "size": 0} == s[0:0].to_dict() assert {"from": 20, "size": 0} == s[20:0].to_dict() assert {"from": 10, "size": 5} == s[10:][:5].to_dict() assert {"from": 10, "size": 0} == s[:5][10:].to_dict() assert {"size": 10} == s[:10][:40].to_dict() assert {"size": 10} == s[:40][:10].to_dict() assert {"size": 40} == s[:40][:80].to_dict() assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict() assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict() assert {} == s[:].to_dict() with raises(ValueError): s[-1:] with raises(ValueError): s[4:-1] with raises(ValueError): s[-3:-2] def test_index() -> None: s = AsyncSearch() assert {"from": 3, "size": 1} == s[3].to_dict() assert {"from": 3, "size": 1} == s[3][0].to_dict() assert {"from": 8, "size": 0} == s[3][5].to_dict() assert {"from": 4, "size": 1} == s[3:10][1].to_dict() with raises(ValueError): s[-3] def test_search_to_dict() -> None: s = AsyncSearch() assert {} == s.to_dict() s = s.query("match", f=42) assert {"query": {"match": {"f": 42}}} == s.to_dict() assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10) s.aggs.bucket("per_tag", "terms", field="f").metric( "max_score", "max", field="score" ) d = { "aggs": { "per_tag": { "terms": {"field": "f"}, "aggs": {"max_score": {"max": {"field": "score"}}}, } }, "query": {"match": {"f": 42}}, } assert d == s.to_dict() s = AsyncSearch(extra={"size": 5}) assert {"size": 5} == s.to_dict() s = s.extra(from_=42) assert {"size": 5, "from": 42} == s.to_dict() def test_complex_example() -> None: s = AsyncSearch() s = ( s.query("match", title="python") .query(~Q("match", title="ruby")) .filter(Q("term", category="meetup") | Q("term", category="conference")) .collapse("user_id") .post_filter("terms", tags=["prague", "czech"]) .script_fields(more_attendees="doc['attendees'].value + 42") ) s.aggs.bucket("per_country", "terms", field="country").metric( "avg_attendees", "avg", field="attendees" ) s.query.minimum_should_match = 2 s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50) assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } }, "post_filter": {"terms": {"tags": ["prague", "czech"]}}, "aggs": { "per_country": { "terms": {"field": "country"}, "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, } }, "collapse": {"field": "user_id"}, "highlight": { "order": "score", "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}}, }, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } == s.to_dict() def test_reverse() -> None: d = { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [ { "bool": { "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } } ], } }, "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, "aggs": { "per_country": { "terms": {"field": "country"}, "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, } }, "sort": ["title", {"category": {"order": "desc"}}, "_score"], "size": 5, "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}}, "suggest": { "my-title-suggestions-1": { "text": "devloping distibutd saerch engies", "term": {"size": 3, "field": "title"}, } }, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } d2 = deepcopy(d) s = AsyncSearch.from_dict(d) # make sure we haven't modified anything in place assert d == d2 assert {"size": 5} == s._extra assert d == s.to_dict() def test_code_generated_classes() -> None: s = AsyncSearch() s = ( s.query(query.Match("title", types.MatchQuery(query="python"))) .query(~query.Match("title", types.MatchQuery(query="ruby"))) .query( query.Knn( field="title", query_vector=[1.0, 2.0, 3.0], num_candidates=10, k=3, filter=query.Range("year", wrappers.Range(gt="2004")), ) ) .filter( query.Term("category", types.TermQuery(value="meetup")) | query.Term("category", types.TermQuery(value="conference")) ) .collapse("user_id") .post_filter(query.Terms(tags=["prague", "czech"])) .script_fields(more_attendees="doc['attendees'].value + 42") ) assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": {"value": "meetup"}}}, {"term": {"category": {"value": "conference"}}}, ] } } ], "must": [ {"match": {"title": {"query": "python"}}}, { "knn": { "field": "title", "filter": [ { "range": { "year": { "gt": "2004", }, }, }, ], "k": 3, "num_candidates": 10, "query_vector": [ 1.0, 2.0, 3.0, ], }, }, ], "must_not": [{"match": {"title": {"query": "ruby"}}}], } }, "post_filter": {"terms": {"tags": ["prague", "czech"]}}, "collapse": {"field": "user_id"}, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } == s.to_dict() def test_from_dict_doesnt_need_query() -> None: s = AsyncSearch.from_dict({"size": 5}) assert {"size": 5} == s.to_dict() @pytest.mark.asyncio async def test_params_being_passed_to_search(async_mock_client: Any) -> None: s = AsyncSearch(using="mock") s = s.params(routing="42") await s.execute() async_mock_client.search.assert_awaited_once_with(index=None, body={}, routing="42") def test_source() -> None: assert {} == AsyncSearch().source().to_dict() assert { "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]} } == AsyncSearch().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict() assert {"_source": False} == AsyncSearch().source(False).to_dict() assert {"_source": ["f1", "f2"]} == AsyncSearch().source( includes=["foo.bar.*"], excludes=["foo.one"] ).source(["f1", "f2"]).to_dict() def test_source_on_clone() -> None: assert { "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}, "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, } == AsyncSearch().source(includes=["foo.bar.*"]).source( excludes=["foo.one"] ).filter( "term", title="python" ).to_dict() assert { "_source": False, "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, } == AsyncSearch().source(False).filter("term", title="python").to_dict() def test_source_on_clear() -> None: assert ( {} == AsyncSearch() .source(includes=["foo.bar.*"]) .source(includes=None, excludes=None) .to_dict() ) def test_suggest_accepts_global_text() -> None: s = AsyncSearch.from_dict( { "suggest": { "text": "the amsterdma meetpu", "my-suggest-1": {"term": {"field": "title"}}, "my-suggest-2": {"text": "other", "term": {"field": "body"}}, } } ) assert { "suggest": { "my-suggest-1": { "term": {"field": "title"}, "text": "the amsterdma meetpu", }, "my-suggest-2": {"term": {"field": "body"}, "text": "other"}, } } == s.to_dict() def test_suggest() -> None: s = AsyncSearch() s = s.suggest("my_suggestion", "pyhton", term={"field": "title"}) assert { "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}} } == s.to_dict() def test_exclude() -> None: s = AsyncSearch() s = s.exclude("match", title="python") assert { "query": { "bool": { "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] } } } == s.to_dict() @pytest.mark.asyncio async def test_delete_by_query(async_mock_client: Any) -> None: s = AsyncSearch(using="mock", index="i").query("match", lang="java") await s.delete() async_mock_client.delete_by_query.assert_awaited_once_with( index=["i"], body={"query": {"match": {"lang": "java"}}} ) def test_update_from_dict() -> None: s = AsyncSearch() s.update_from_dict({"indices_boost": [{"important-documents": 2}]}) s.update_from_dict({"_source": ["id", "name"]}) s.update_from_dict({"collapse": {"field": "user_id"}}) assert { "indices_boost": [{"important-documents": 2}], "_source": ["id", "name"], "collapse": {"field": "user_id"}, } == s.to_dict() def test_rescore_query_to_dict() -> None: s = AsyncSearch(index="index-name") positive_query = Q( "function_score", query=Q("term", tags="a"), script_score={"script": "_score * 1"}, ) negative_query = Q( "function_score", query=Q("term", tags="b"), script_score={"script": "_score * -100"}, ) s = s.query(positive_query) s = s.extra( rescore={"window_size": 100, "query": {"rescore_query": negative_query}} ) assert s.to_dict() == { "query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } }, "rescore": { "window_size": 100, "query": { "rescore_query": { "function_score": { "query": {"term": {"tags": "b"}}, "functions": [{"script_score": {"script": "_score * -100"}}], } } }, }, } assert s.to_dict( rescore={"window_size": 10, "query": {"rescore_query": positive_query}} ) == { "query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } }, "rescore": { "window_size": 10, "query": { "rescore_query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } } }, }, } @pytest.mark.asyncio async def test_empty_search() -> None: s = AsyncEmptySearch(index="index-name") s = s.query("match", lang="java") s.aggs.bucket("versions", "terms", field="version") assert await s.count() == 0 assert [hit async for hit in s] == [] assert [hit async for hit in s.scan()] == [] await s.delete() # should not error def test_suggest_completion() -> None: s = AsyncSearch() s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"}) assert { "suggest": { "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"} } } == s.to_dict() def test_suggest_regex_query() -> None: s = AsyncSearch() s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"}) assert { "suggest": { "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"} } } == s.to_dict() def test_suggest_must_pass_text_or_regex() -> None: s = AsyncSearch() with raises(ValueError): s.suggest("my_suggestion") def test_suggest_can_only_pass_text_or_regex() -> None: s = AsyncSearch() with raises(ValueError): s.suggest("my_suggestion", text="python", regex="py[hton|py]") def test_suggest_regex_must_be_wtih_completion() -> None: s = AsyncSearch() with raises(ValueError): s.suggest("my_suggestion", regex="py[thon|py]") python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_async/test_update_by_query.py000066400000000000000000000127151506101734100314370ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from copy import deepcopy from typing import Any import pytest from elasticsearch.dsl import AsyncUpdateByQuery, Q from elasticsearch.dsl.response import UpdateByQueryResponse from elasticsearch.dsl.search_base import SearchBase def test_ubq_starts_with_no_query() -> None: ubq = AsyncUpdateByQuery() assert ubq.query._proxied is None def test_ubq_to_dict() -> None: ubq = AsyncUpdateByQuery() assert {} == ubq.to_dict() ubq = ubq.query("match", f=42) assert {"query": {"match": {"f": 42}}} == ubq.to_dict() assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10) ubq = AsyncUpdateByQuery(extra={"size": 5}) assert {"size": 5} == ubq.to_dict() ubq = AsyncUpdateByQuery(extra={"extra_q": Q("term", category="conference")}) assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict() def test_complex_example() -> None: ubq = AsyncUpdateByQuery() ubq = ( ubq.query("match", title="python") .query(~Q("match", title="ruby")) .filter(Q("term", category="meetup") | Q("term", category="conference")) .script( source="ctx._source.likes += params.f", lang="painless", params={"f": 3} ) ) ubq.query.minimum_should_match = 2 assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } }, "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, }, } == ubq.to_dict() def test_exclude() -> None: ubq = AsyncUpdateByQuery() ubq = ubq.exclude("match", title="python") assert { "query": { "bool": { "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] } } } == ubq.to_dict() def test_reverse() -> None: d = { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [ { "bool": { "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } } ], } }, "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, }, } d2 = deepcopy(d) ubq = AsyncUpdateByQuery.from_dict(d) assert d == d2 assert d == ubq.to_dict() def test_from_dict_doesnt_need_query() -> None: ubq = AsyncUpdateByQuery.from_dict({"script": {"source": "test"}}) assert {"script": {"source": "test"}} == ubq.to_dict() @pytest.mark.asyncio async def test_params_being_passed_to_search(async_mock_client: Any) -> None: ubq = AsyncUpdateByQuery(using="mock", index="i") ubq = ubq.params(routing="42") await ubq.execute() async_mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42") def test_overwrite_script() -> None: ubq = AsyncUpdateByQuery() ubq = ubq.script( source="ctx._source.likes += params.f", lang="painless", params={"f": 3} ) assert { "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, } } == ubq.to_dict() ubq = ubq.script(source="ctx._source.likes++") assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict() def test_update_by_query_response_success() -> None: ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []}) assert ubqr.success() ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []}) assert not ubqr.success() ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]}) assert not ubqr.success() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/000077500000000000000000000000001506101734100244565ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/__init__.py000066400000000000000000000014231506101734100265670ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_document.py000066400000000000000000000560341506101734100277150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # this file creates several documents using bad or no types because # these are still supported and should be kept functional in spite # of not having appropriate type hints. For that reason the comment # below disables many mypy checks that fails as a result of this. # mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" import codecs import ipaddress import pickle import sys from datetime import datetime from hashlib import md5 from typing import Any, ClassVar, Dict, List, Optional import pytest from pytest import raises from elasticsearch.dsl import ( Document, Index, InnerDoc, M, Mapping, MetaField, Range, analyzer, field, mapped_field, utils, ) from elasticsearch.dsl.document_base import InstrumentedField from elasticsearch.dsl.exceptions import IllegalOperation, ValidationException class MyInner(InnerDoc): old_field = field.Text() class MyDoc(Document): title = field.Keyword() name = field.Text() created_at = field.Date() inner = field.Object(MyInner) class MySubDoc(MyDoc): name = field.Keyword() class Index: name = "default-index" class MyDoc2(Document): extra = field.Long() class MyMultiSubDoc(MyDoc2, MySubDoc): pass class Comment(InnerDoc): title = field.Text() tags = field.Keyword(multi=True) class DocWithNested(Document): comments = field.Nested(Comment) class Index: name = "test-doc-with-nested" class SimpleCommit(Document): files = field.Text(multi=True) class Index: name = "test-git" class Secret(str): pass class SecretField(field.CustomField): builtin_type = "text" def _serialize(self, data: Any) -> Any: return codecs.encode(data, "rot_13") def _deserialize(self, data: Any) -> Any: if isinstance(data, Secret): return data return Secret(codecs.decode(data, "rot_13")) class SecretDoc(Document): title = SecretField(index="no") class Index: name = "test-secret-doc" class NestedSecret(Document): secrets = field.Nested(SecretDoc) class Index: name = "test-nested-secret" class OptionalObjectWithRequiredField(Document): comments = field.Nested(properties={"title": field.Keyword(required=True)}) class Index: name = "test-required" class Host(Document): ip = field.Ip() class Index: name = "test-host" def test_range_serializes_properly() -> None: class D(Document): lr: Range[int] = field.LongRange() d = D(lr=Range(lt=42)) assert 40 in d.lr assert 47 not in d.lr assert {"lr": {"lt": 42}} == d.to_dict() d = D(lr={"lt": 42}) assert {"lr": {"lt": 42}} == d.to_dict() def test_range_deserializes_properly() -> None: class D(InnerDoc): lr = field.LongRange() d = D.from_es({"lr": {"lt": 42}}, True) assert isinstance(d.lr, Range) assert 40 in d.lr assert 47 not in d.lr def test_resolve_nested() -> None: nested, field = NestedSecret._index.resolve_nested("secrets.title") assert nested == ["secrets"] assert field is NestedSecret._doc_type.mapping["secrets"]["title"] def test_conflicting_mapping_raises_error_in_index_to_dict() -> None: class A(Document): name = field.Text() class B(Document): name = field.Keyword() i = Index("i") i.document(A) i.document(B) with raises(ValueError): i.to_dict() def test_ip_address_serializes_properly() -> None: host = Host(ip=ipaddress.IPv4Address("10.0.0.1")) assert {"ip": "10.0.0.1"} == host.to_dict() def test_matches_uses_index() -> None: assert SimpleCommit._matches({"_index": "test-git"}) assert not SimpleCommit._matches({"_index": "not-test-git"}) def test_matches_with_no_name_always_matches() -> None: class D(Document): pass assert D._matches({}) assert D._matches({"_index": "whatever"}) def test_matches_accepts_wildcards() -> None: class MyDoc(Document): class Index: name = "my-*" assert MyDoc._matches({"_index": "my-index"}) assert not MyDoc._matches({"_index": "not-my-index"}) def test_assigning_attrlist_to_field() -> None: sc = SimpleCommit() l = ["README", "README.rst"] sc.files = utils.AttrList(l) assert sc.to_dict()["files"] is l def test_optional_inner_objects_are_not_validated_if_missing() -> None: d = OptionalObjectWithRequiredField() d.full_clean() def test_custom_field() -> None: s = SecretDoc(title=Secret("Hello")) assert {"title": "Uryyb"} == s.to_dict() assert s.title == "Hello" s = SecretDoc.from_es({"_source": {"title": "Uryyb"}}) assert s.title == "Hello" assert isinstance(s.title, Secret) def test_custom_field_mapping() -> None: assert { "properties": {"title": {"index": "no", "type": "text"}} } == SecretDoc._doc_type.mapping.to_dict() def test_custom_field_in_nested() -> None: s = NestedSecret() s.secrets.append(SecretDoc(title=Secret("Hello"))) assert {"secrets": [{"title": "Uryyb"}]} == s.to_dict() assert s.secrets[0].title == "Hello" def test_multi_works_after_doc_has_been_saved() -> None: c = SimpleCommit() c.full_clean() c.files.append("setup.py") assert c.to_dict() == {"files": ["setup.py"]} def test_multi_works_in_nested_after_doc_has_been_serialized() -> None: # Issue #359 c = DocWithNested(comments=[Comment(title="First!")]) assert [] == c.comments[0].tags assert {"comments": [{"title": "First!"}]} == c.to_dict() assert [] == c.comments[0].tags def test_null_value_for_object() -> None: d = MyDoc(inner=None) assert d.inner is None def test_inherited_doc_types_can_override_index() -> None: class MyDocDifferentIndex(MySubDoc): class Index: name = "not-default-index" settings = {"number_of_replicas": 0} aliases: Dict[str, Any] = {"a": {}} analyzers = [analyzer("my_analizer", tokenizer="keyword")] assert MyDocDifferentIndex._index._name == "not-default-index" assert MyDocDifferentIndex()._get_index() == "not-default-index" assert MyDocDifferentIndex._index.to_dict() == { "aliases": {"a": {}}, "mappings": { "properties": { "created_at": {"type": "date"}, "inner": { "type": "object", "properties": {"old_field": {"type": "text"}}, }, "name": {"type": "keyword"}, "title": {"type": "keyword"}, } }, "settings": { "analysis": { "analyzer": {"my_analizer": {"tokenizer": "keyword", "type": "custom"}} }, "number_of_replicas": 0, }, } def test_to_dict_with_meta() -> None: d = MySubDoc(title="hello") d.meta.routing = "some-parent" assert { "_index": "default-index", "_routing": "some-parent", "_source": {"title": "hello"}, } == d.to_dict(True) def test_to_dict_with_meta_includes_custom_index() -> None: d = MySubDoc(title="hello") d.meta.index = "other-index" assert {"_index": "other-index", "_source": {"title": "hello"}} == d.to_dict(True) def test_to_dict_without_skip_empty_will_include_empty_fields() -> None: d = MySubDoc(tags=[], title=None, inner={}) assert {} == d.to_dict() assert {"tags": [], "title": None, "inner": {}} == d.to_dict(skip_empty=False) def test_attribute_can_be_removed() -> None: d = MyDoc(title="hello") del d.title assert "title" not in d._d_ def test_doc_type_can_be_correctly_pickled() -> None: d = DocWithNested( title="Hello World!", comments=[Comment(title="hellp")], meta={"id": 42} ) s = pickle.dumps(d) d2 = pickle.loads(s) assert d2 == d assert 42 == d2.meta.id assert "Hello World!" == d2.title assert [{"title": "hellp"}] == d2.comments assert isinstance(d2.comments[0], Comment) def test_meta_is_accessible_even_on_empty_doc() -> None: d = MyDoc() d.meta d = MyDoc(title="aaa") d.meta def test_meta_field_mapping() -> None: class User(Document): username = field.Text() class Meta: all = MetaField(enabled=False) _index = MetaField(enabled=True) dynamic = MetaField("strict") dynamic_templates = MetaField([42]) assert { "properties": {"username": {"type": "text"}}, "_all": {"enabled": False}, "_index": {"enabled": True}, "dynamic": "strict", "dynamic_templates": [42], } == User._doc_type.mapping.to_dict() def test_multi_value_fields() -> None: class Blog(Document): tags = field.Keyword(multi=True) b = Blog() assert [] == b.tags b.tags.append("search") b.tags.append("python") assert ["search", "python"] == b.tags def test_docs_with_properties() -> None: class User(Document): pwd_hash: str = field.Text() def check_password(self, pwd: bytes) -> bool: return md5(pwd).hexdigest() == self.pwd_hash @property def password(self) -> None: raise AttributeError("readonly") @password.setter def password(self, pwd: bytes) -> None: self.pwd_hash = md5(pwd).hexdigest() u = User(pwd_hash=md5(b"secret").hexdigest()) assert u.check_password(b"secret") assert not u.check_password(b"not-secret") u.password = b"not-secret" assert "password" not in u._d_ assert not u.check_password(b"secret") assert u.check_password(b"not-secret") with raises(AttributeError): u.password def test_nested_can_be_assigned_to() -> None: d1 = DocWithNested(comments=[Comment(title="First!")]) d2 = DocWithNested() d2.comments = d1.comments assert isinstance(d1.comments[0], Comment) assert d2.comments == [{"title": "First!"}] assert {"comments": [{"title": "First!"}]} == d2.to_dict() assert isinstance(d2.comments[0], Comment) def test_nested_can_be_none() -> None: d = DocWithNested(comments=None, title="Hello World!") assert {"title": "Hello World!"} == d.to_dict() def test_nested_defaults_to_list_and_can_be_updated() -> None: md = DocWithNested() assert [] == md.comments md.comments.append({"title": "hello World!"}) assert {"comments": [{"title": "hello World!"}]} == md.to_dict() def test_to_dict_is_recursive_and_can_cope_with_multi_values() -> None: md = MyDoc(name=["a", "b", "c"]) md.inner = [MyInner(old_field="of1"), MyInner(old_field="of2")] assert isinstance(md.inner[0], MyInner) assert { "name": ["a", "b", "c"], "inner": [{"old_field": "of1"}, {"old_field": "of2"}], } == md.to_dict() def test_to_dict_ignores_empty_collections() -> None: md = MySubDoc(name="", address={}, count=0, valid=False, tags=[]) assert {"name": "", "count": 0, "valid": False} == md.to_dict() def test_declarative_mapping_definition() -> None: assert issubclass(MyDoc, Document) assert hasattr(MyDoc, "_doc_type") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "text"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, } } == MyDoc._doc_type.mapping.to_dict() def test_you_can_supply_own_mapping_instance() -> None: class MyD(Document): title = field.Text() class Meta: mapping = Mapping() mapping.meta("_all", enabled=False) assert { "_all": {"enabled": False}, "properties": {"title": {"type": "text"}}, } == MyD._doc_type.mapping.to_dict() def test_document_can_be_created_dynamically() -> None: n = datetime.now() md = MyDoc(title="hello") md.name = "My Fancy Document!" md.created_at = n inner = md.inner # consistent returns assert inner is md.inner inner.old_field = "Already defined." md.inner.new_field = ["undefined", "field"] assert { "title": "hello", "name": "My Fancy Document!", "created_at": n, "inner": {"old_field": "Already defined.", "new_field": ["undefined", "field"]}, } == md.to_dict() def test_invalid_date_will_raise_exception() -> None: md = MyDoc() md.created_at = "not-a-date" with raises(ValidationException): md.full_clean() def test_document_inheritance() -> None: assert issubclass(MySubDoc, MyDoc) assert issubclass(MySubDoc, Document) assert hasattr(MySubDoc, "_doc_type") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "keyword"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, } } == MySubDoc._doc_type.mapping.to_dict() def test_child_class_can_override_parent() -> None: class A(Document): o = field.Object(dynamic=False, properties={"a": field.Text()}) class B(A): o = field.Object(dynamic="strict", properties={"b": field.Text()}) assert { "properties": { "o": { "dynamic": "strict", "properties": {"a": {"type": "text"}, "b": {"type": "text"}}, "type": "object", } } } == B._doc_type.mapping.to_dict() def test_meta_fields_are_stored_in_meta_and_ignored_by_to_dict() -> None: md = MySubDoc(meta={"id": 42}, name="My First doc!") md.meta.index = "my-index" assert md.meta.index == "my-index" assert md.meta.id == 42 assert {"name": "My First doc!"} == md.to_dict() assert {"id": 42, "index": "my-index"} == md.meta.to_dict() def test_index_inheritance() -> None: assert issubclass(MyMultiSubDoc, MySubDoc) assert issubclass(MyMultiSubDoc, MyDoc2) assert issubclass(MyMultiSubDoc, Document) assert hasattr(MyMultiSubDoc, "_doc_type") assert hasattr(MyMultiSubDoc, "_index") assert { "properties": { "created_at": {"type": "date"}, "name": {"type": "keyword"}, "title": {"type": "keyword"}, "inner": {"type": "object", "properties": {"old_field": {"type": "text"}}}, "extra": {"type": "long"}, } } == MyMultiSubDoc._doc_type.mapping.to_dict() def test_meta_fields_can_be_set_directly_in_init() -> None: p = object() md = MyDoc(_id=p, title="Hello World!") assert md.meta.id is p @pytest.mark.sync def test_save_no_index(mock_client: Any) -> None: md = MyDoc() with raises(ValidationException): md.save(using="mock") @pytest.mark.sync def test_delete_no_index(mock_client: Any) -> None: md = MyDoc() with raises(ValidationException): md.delete(using="mock") @pytest.mark.sync def test_update_no_fields() -> None: md = MyDoc() with raises(IllegalOperation): md.update() def test_search_with_custom_alias_and_index() -> None: search_object = MyDoc.search( using="staging", index=["custom_index1", "custom_index2"] ) assert search_object._using == "staging" assert search_object._index == ["custom_index1", "custom_index2"] def test_from_es_respects_underscored_non_meta_fields() -> None: doc = { "_index": "test-index", "_id": "elasticsearch", "_score": 12.0, "fields": {"hello": "world", "_routing": "es", "_tags": ["search"]}, "_source": { "city": "Amsterdam", "name": "Elasticsearch", "_tagline": "You know, for search", }, } class Company(Document): class Index: name = "test-company" c = Company.from_es(doc) assert c.meta.fields._tags == ["search"] assert c.meta.fields._routing == "es" assert c._tagline == "You know, for search" def test_nested_and_object_inner_doc() -> None: class MySubDocWithNested(MyDoc): nested_inner = field.Nested(MyInner) props = MySubDocWithNested._doc_type.mapping.to_dict()["properties"] assert props == { "created_at": {"type": "date"}, "inner": {"properties": {"old_field": {"type": "text"}}, "type": "object"}, "name": {"type": "text"}, "nested_inner": { "properties": {"old_field": {"type": "text"}}, "type": "nested", }, "title": {"type": "keyword"}, } def test_doc_with_type_hints() -> None: class TypedInnerDoc(InnerDoc): st: M[str] dt: M[Optional[datetime]] li: M[List[int]] class TypedDoc(Document): st: str dt: Optional[datetime] li: List[int] ob: TypedInnerDoc ns: List[TypedInnerDoc] ip: Optional[str] = field.Ip() k1: str = field.Keyword(required=True) k2: M[str] = field.Keyword() k3: str = mapped_field(field.Keyword(), default="foo") k4: M[Optional[str]] = mapped_field(field.Keyword()) # type: ignore[misc] s1: Secret = SecretField() s2: M[Secret] = SecretField() s3: Secret = mapped_field(SecretField()) # type: ignore[misc] s4: M[Optional[Secret]] = mapped_field( SecretField(), default_factory=lambda: "foo" ) i1: ClassVar i2: ClassVar[int] props = TypedDoc._doc_type.mapping.to_dict()["properties"] assert props == { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, "ob": { "type": "object", "properties": { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, }, }, "ns": { "type": "nested", "properties": { "st": {"type": "text"}, "dt": {"type": "date"}, "li": {"type": "integer"}, }, }, "ip": {"type": "ip"}, "k1": {"type": "keyword"}, "k2": {"type": "keyword"}, "k3": {"type": "keyword"}, "k4": {"type": "keyword"}, "s1": {"type": "text"}, "s2": {"type": "text"}, "s3": {"type": "text"}, "s4": {"type": "text"}, } TypedDoc.i1 = "foo" TypedDoc.i2 = 123 doc = TypedDoc() assert doc.k3 == "foo" assert doc.s4 == "foo" with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == { "st", "k1", "k2", "ob", "s1", "s2", "s3", } assert TypedDoc.i1 == "foo" assert TypedDoc.i2 == 123 doc.st = "s" doc.li = [1, 2, 3] doc.k1 = "k1" doc.k2 = "k2" doc.ob.st = "s" doc.ob.li = [1] doc.s1 = "s1" doc.s2 = "s2" doc.s3 = "s3" doc.full_clean() doc.ob = TypedInnerDoc(li=[1]) with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == {"ob"} assert set(exc_info.value.args[0]["ob"][0].args[0].keys()) == {"st"} doc.ob.st = "s" doc.ns.append(TypedInnerDoc(li=[1, 2])) with raises(ValidationException) as exc_info: doc.full_clean() doc.ns[0].st = "s" doc.full_clean() doc.ip = "1.2.3.4" n = datetime.now() doc.dt = n assert doc.to_dict() == { "st": "s", "li": [1, 2, 3], "dt": n, "ob": { "st": "s", "li": [1], }, "ns": [ { "st": "s", "li": [1, 2], } ], "ip": "1.2.3.4", "k1": "k1", "k2": "k2", "k3": "foo", "s1": "s1", "s2": "s2", "s3": "s3", "s4": "foo", } s = TypedDoc.search().sort(TypedDoc.st, -TypedDoc.dt, +TypedDoc.ob.st) s.aggs.bucket("terms_agg", "terms", field=TypedDoc.k1) assert s.to_dict() == { "aggs": {"terms_agg": {"terms": {"field": "k1"}}}, "sort": ["st", {"dt": {"order": "desc"}}, "ob.st"], } @pytest.mark.skipif(sys.version_info < (3, 10), reason="requires Python 3.10") def test_doc_with_pipe_type_hints() -> None: with pytest.raises(TypeError): class BadlyTypedDoc(Document): s: str f: str | int | None # type: ignore[syntax] class TypedDoc(Document): s: str f1: str | None # type: ignore[syntax] f2: M[int | None] # type: ignore[syntax] f3: M[datetime | None] # type: ignore[syntax] props = TypedDoc._doc_type.mapping.to_dict()["properties"] assert props == { "s": {"type": "text"}, "f1": {"type": "text"}, "f2": {"type": "integer"}, "f3": {"type": "date"}, } doc = TypedDoc() with raises(ValidationException) as exc_info: doc.full_clean() assert set(exc_info.value.args[0].keys()) == {"s"} doc.s = "s" doc.full_clean() def test_instrumented_field() -> None: class Child(InnerDoc): st: M[str] class Doc(Document): st: str ob: Child ns: List[Child] doc = Doc( st="foo", ob=Child(st="bar"), ns=[ Child(st="baz"), Child(st="qux"), ], ) assert type(doc.st) is str assert doc.st == "foo" assert type(doc.ob) is Child assert doc.ob.st == "bar" assert type(doc.ns) is utils.AttrList assert doc.ns[0].st == "baz" assert doc.ns[1].st == "qux" assert type(doc.ns[0]) is Child assert type(doc.ns[1]) is Child assert type(Doc.st) is InstrumentedField assert str(Doc.st) == "st" assert +Doc.st == "st" assert -Doc.st == "-st" assert Doc.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.st.something assert type(Doc.ob) is InstrumentedField assert str(Doc.ob) == "ob" assert str(Doc.ob.st) == "ob.st" assert +Doc.ob.st == "ob.st" assert -Doc.ob.st == "-ob.st" assert Doc.ob.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.ob.something with raises(AttributeError): Doc.ob.st.something assert type(Doc.ns) is InstrumentedField assert str(Doc.ns) == "ns" assert str(Doc.ns.st) == "ns.st" assert +Doc.ns.st == "ns.st" assert -Doc.ns.st == "-ns.st" assert Doc.ns.st.to_dict() == {"type": "text"} with raises(AttributeError): Doc.ns.something with raises(AttributeError): Doc.ns.st.something python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_faceted_search.py000066400000000000000000000146471506101734100310230ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import pytest from elasticsearch.dsl.faceted_search import ( DateHistogramFacet, FacetedSearch, TermsFacet, ) class BlogSearch(FacetedSearch): doc_types = ["user", "post"] fields = [ "title^5", "body", ] facets = { "category": TermsFacet(field="category.raw"), "tags": TermsFacet(field="tags"), } def test_query_is_created_properly() -> None: bs = BlogSearch("python search") s = bs.build_search() assert s._doc_type == ["user", "post"] assert { "aggs": { "_filter_tags": { "filter": {"match_all": {}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, } == s.to_dict() def test_query_is_created_properly_with_sort_tuple() -> None: bs = BlogSearch("python search", sort=("category", "-title")) s = bs.build_search() assert s._doc_type == ["user", "post"] assert { "aggs": { "_filter_tags": { "filter": {"match_all": {}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, "sort": ["category", {"title": {"order": "desc"}}], } == s.to_dict() def test_filter_is_applied_to_search_but_not_relevant_facet() -> None: bs = BlogSearch("python search", filters={"category": "elastic"}) s = bs.build_search() assert { "aggs": { "_filter_tags": { "filter": {"terms": {"category.raw": ["elastic"]}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"match_all": {}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "post_filter": {"terms": {"category.raw": ["elastic"]}}, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "highlight": {"fields": {"body": {}, "title": {}}}, } == s.to_dict() def test_filters_are_applied_to_search_ant_relevant_facets() -> None: bs = BlogSearch( "python search", filters={"category": "elastic", "tags": ["python", "django"]} ) s = bs.build_search() d = s.to_dict() # we need to test post_filter without relying on order f = d["post_filter"]["bool"].pop("must") assert len(f) == 2 assert {"terms": {"category.raw": ["elastic"]}} in f assert {"terms": {"tags": ["python", "django"]}} in f assert { "aggs": { "_filter_tags": { "filter": {"terms": {"category.raw": ["elastic"]}}, "aggs": {"tags": {"terms": {"field": "tags"}}}, }, "_filter_category": { "filter": {"terms": {"tags": ["python", "django"]}}, "aggs": {"category": {"terms": {"field": "category.raw"}}}, }, }, "query": { "multi_match": {"fields": ["title^5", "body"], "query": "python search"} }, "post_filter": {"bool": {}}, "highlight": {"fields": {"body": {}, "title": {}}}, } == d def test_date_histogram_facet_with_1970_01_01_date() -> None: dhf = DateHistogramFacet() assert dhf.get_value({"key": None}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] assert dhf.get_value({"key": 0}) == datetime(1970, 1, 1, 0, 0) # type: ignore[arg-type] @pytest.mark.parametrize( ["interval_type", "interval"], [ ("interval", "year"), ("calendar_interval", "year"), ("interval", "month"), ("calendar_interval", "month"), ("interval", "week"), ("calendar_interval", "week"), ("interval", "day"), ("calendar_interval", "day"), ("fixed_interval", "day"), ("interval", "hour"), ("fixed_interval", "hour"), ("interval", "1Y"), ("calendar_interval", "1Y"), ("interval", "1M"), ("calendar_interval", "1M"), ("interval", "1w"), ("calendar_interval", "1w"), ("interval", "1d"), ("calendar_interval", "1d"), ("fixed_interval", "1d"), ("interval", "1h"), ("fixed_interval", "1h"), ], ) def test_date_histogram_interval_types(interval_type: str, interval: str) -> None: dhf = DateHistogramFacet(field="@timestamp", **{interval_type: interval}) assert dhf.get_aggregation().to_dict() == { "date_histogram": { "field": "@timestamp", interval_type: interval, "min_doc_count": 0, } } dhf.get_value_filter(datetime.now()) def test_date_histogram_no_interval_keyerror() -> None: dhf = DateHistogramFacet(field="@timestamp") with pytest.raises(KeyError) as e: dhf.get_value_filter(datetime.now()) assert str(e.value) == "'interval'" def test_params_added_to_search() -> None: bs = BlogSearch("python search") assert bs._s._params == {} bs.params(routing="42") assert bs._s._params == {"routing": "42"} python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_index.py000066400000000000000000000127421506101734100272040ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import string from random import choice from typing import Any, Dict import pytest from pytest import raises from elasticsearch.dsl import ( Date, Document, Index, IndexTemplate, Text, analyzer, ) class Post(Document): title = Text() published_from = Date() def test_multiple_doc_types_will_combine_mappings() -> None: class User(Document): username = Text() i = Index("i") i.document(Post) i.document(User) assert { "mappings": { "properties": { "title": {"type": "text"}, "username": {"type": "text"}, "published_from": {"type": "date"}, } } } == i.to_dict() def test_search_is_limited_to_index_name() -> None: i = Index("my-index") s = i.search() assert s._index == ["my-index"] def test_cloned_index_has_copied_settings_and_using() -> None: client = object() i = Index("my-index", using=client) # type: ignore[arg-type] i.settings(number_of_shards=1) i2 = i.clone("my-other-index") assert "my-other-index" == i2._name assert client is i2._using assert i._settings == i2._settings assert i._settings is not i2._settings def test_cloned_index_has_analysis_attribute() -> None: """ Regression test for Issue #582 in which `AsyncIndex.clone()` was not copying over the `_analysis` attribute. """ client = object() i = Index("my-index", using=client) # type: ignore[arg-type] random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) i.analyzer(random_analyzer) i2 = i.clone("my-clone-index") assert i.to_dict()["settings"]["analysis"] == i2.to_dict()["settings"]["analysis"] def test_settings_are_saved() -> None: i = Index("i") i.settings(number_of_replicas=0) i.settings(number_of_shards=1) assert {"settings": {"number_of_shards": 1, "number_of_replicas": 0}} == i.to_dict() def test_registered_doc_type_included_in_to_dict() -> None: i = Index("i", using="alias") i.document(Post) assert { "mappings": { "properties": { "title": {"type": "text"}, "published_from": {"type": "date"}, } } } == i.to_dict() def test_registered_doc_type_included_in_search() -> None: i = Index("i", using="alias") i.document(Post) s = i.search() assert s._doc_type == [Post] def test_aliases_add_to_object() -> None: random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) alias_dict: Dict[str, Any] = {random_alias: {}} index = Index("i", using="alias") index.aliases(**alias_dict) assert index._aliases == alias_dict def test_aliases_returned_from_to_dict() -> None: random_alias = "".join(choice(string.ascii_letters) for _ in range(100)) alias_dict: Dict[str, Any] = {random_alias: {}} index = Index("i", using="alias") index.aliases(**alias_dict) assert index._aliases == index.to_dict()["aliases"] == alias_dict def test_analyzers_added_to_object() -> None: random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) index = Index("i", using="alias") index.analyzer(random_analyzer) assert index._analysis["analyzer"][random_analyzer_name] == { "filter": ["standard"], "type": "custom", "tokenizer": "standard", } def test_analyzers_returned_from_to_dict() -> None: random_analyzer_name = "".join(choice(string.ascii_letters) for _ in range(100)) random_analyzer = analyzer( random_analyzer_name, tokenizer="standard", filter="standard" ) index = Index("i", using="alias") index.analyzer(random_analyzer) assert index.to_dict()["settings"]["analysis"]["analyzer"][ random_analyzer_name ] == {"filter": ["standard"], "type": "custom", "tokenizer": "standard"} def test_conflicting_analyzer_raises_error() -> None: i = Index("i") i.analyzer("my_analyzer", tokenizer="whitespace", filter=["lowercase", "stop"]) with raises(ValueError): i.analyzer("my_analyzer", tokenizer="keyword", filter=["lowercase", "stop"]) def test_index_template_can_have_order() -> None: i = Index("i-*") it = i.as_template("i", order=2) assert {"index_patterns": ["i-*"], "order": 2} == it.to_dict() @pytest.mark.sync def test_index_template_save_result(mock_client: Any) -> None: it = IndexTemplate("test-template", "test-*") assert it.save(using="mock") == mock_client.indices.put_template() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_mapping.py000066400000000000000000000163641506101734100275340ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from elasticsearch.dsl import Keyword, Mapping, Nested, Text, analysis def test_mapping_can_has_fields() -> None: m = Mapping() m.field("name", "text").field("tags", "keyword") assert { "properties": {"name": {"type": "text"}, "tags": {"type": "keyword"}} } == m.to_dict() def test_mapping_update_is_recursive() -> None: m1 = Mapping() m1.field("title", "text") m1.field("author", "object") m1.field("author", "object", properties={"name": {"type": "text"}}) m1.meta("_all", enabled=False) m1.meta("dynamic", False) m2 = Mapping() m2.field("published_from", "date") m2.field("author", "object", properties={"email": {"type": "text"}}) m2.field("title", "text") m2.field("lang", "keyword") m2.meta("_analyzer", path="lang") m1.update(m2, update_only=True) assert { "_all": {"enabled": False}, "_analyzer": {"path": "lang"}, "dynamic": False, "properties": { "published_from": {"type": "date"}, "title": {"type": "text"}, "lang": {"type": "keyword"}, "author": { "type": "object", "properties": {"name": {"type": "text"}, "email": {"type": "text"}}, }, }, } == m1.to_dict() def test_properties_can_iterate_over_all_the_fields() -> None: m = Mapping() m.field("f1", "text", test_attr="f1", fields={"f2": Keyword(test_attr="f2")}) m.field("f3", Nested(test_attr="f3", properties={"f4": Text(test_attr="f4")})) assert {"f1", "f2", "f3", "f4"} == { f.test_attr for f in m.properties._collect_fields() } def test_mapping_can_collect_all_analyzers_and_normalizers() -> None: a1 = analysis.analyzer( "my_analyzer1", tokenizer="keyword", filter=[ "lowercase", analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), ], ) a2 = analysis.analyzer("english") a3 = analysis.analyzer("unknown_custom") a4 = analysis.analyzer( "my_analyzer2", tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], ) a5 = analysis.analyzer("my_analyzer3", tokenizer="keyword") n1 = analysis.normalizer("my_normalizer1", filter=["lowercase"]) n2 = analysis.normalizer( "my_normalizer2", filter=[ "my_filter1", "my_filter2", analysis.token_filter("my_filter3", "stop", stopwords=["e", "f"]), ], ) n3 = analysis.normalizer("unknown_custom") m = Mapping() m.field( "title", "text", analyzer=a1, fields={"english": Text(analyzer=a2), "unknown": Keyword(search_analyzer=a3)}, ) m.field("comments", Nested(properties={"author": Text(analyzer=a4)})) m.field("normalized_title", "keyword", normalizer=n1) m.field("normalized_comment", "keyword", normalizer=n2) m.field("unknown", "keyword", normalizer=n3) m.meta("_all", analyzer=a5) assert { "analyzer": { "my_analyzer1": { "filter": ["lowercase", "my_filter1"], "tokenizer": "keyword", "type": "custom", }, "my_analyzer2": { "filter": ["my_filter2"], "tokenizer": "trigram", "type": "custom", }, "my_analyzer3": {"tokenizer": "keyword", "type": "custom"}, }, "normalizer": { "my_normalizer1": {"filter": ["lowercase"], "type": "custom"}, "my_normalizer2": { "filter": ["my_filter1", "my_filter2", "my_filter3"], "type": "custom", }, }, "filter": { "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, "my_filter3": {"stopwords": ["e", "f"], "type": "stop"}, }, "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, } == m._collect_analysis() assert json.loads(json.dumps(m.to_dict())) == m.to_dict() def test_mapping_can_collect_multiple_analyzers() -> None: a1 = analysis.analyzer( "my_analyzer1", tokenizer="keyword", filter=[ "lowercase", analysis.token_filter("my_filter1", "stop", stopwords=["a", "b"]), ], ) a2 = analysis.analyzer( "my_analyzer2", tokenizer=analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3), filter=[analysis.token_filter("my_filter2", "stop", stopwords=["c", "d"])], ) m = Mapping() m.field("title", "text", analyzer=a1, search_analyzer=a2) m.field( "text", "text", analyzer=a1, fields={ "english": Text(analyzer=a1), "unknown": Keyword(analyzer=a1, search_analyzer=a2), }, ) assert { "analyzer": { "my_analyzer1": { "filter": ["lowercase", "my_filter1"], "tokenizer": "keyword", "type": "custom", }, "my_analyzer2": { "filter": ["my_filter2"], "tokenizer": "trigram", "type": "custom", }, }, "filter": { "my_filter1": {"stopwords": ["a", "b"], "type": "stop"}, "my_filter2": {"stopwords": ["c", "d"], "type": "stop"}, }, "tokenizer": {"trigram": {"max_gram": 3, "min_gram": 3, "type": "nGram"}}, } == m._collect_analysis() def test_even_non_custom_analyzers_can_have_params() -> None: a1 = analysis.analyzer("whitespace", type="pattern", pattern=r"\\s+") m = Mapping() m.field("title", "text", analyzer=a1) assert { "analyzer": {"whitespace": {"type": "pattern", "pattern": r"\\s+"}} } == m._collect_analysis() def test_resolve_field_can_resolve_multifields() -> None: m = Mapping() m.field("title", "text", fields={"keyword": Keyword()}) assert isinstance(m.resolve_field("title.keyword"), Keyword) def test_resolve_nested() -> None: m = Mapping() m.field("n1", "nested", properties={"n2": Nested(properties={"k1": Keyword()})}) m.field("k2", "keyword") nested, field = m.resolve_nested("n1.n2.k1") assert nested == ["n1", "n1.n2"] assert isinstance(field, Keyword) nested, field = m.resolve_nested("k2") assert nested == [] assert isinstance(field, Keyword) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_search.py000066400000000000000000000574141506101734100273470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from copy import deepcopy from typing import Any import pytest from pytest import raises from elasticsearch.dsl import ( Document, EmptySearch, Q, Search, query, types, wrappers, ) from elasticsearch.dsl.exceptions import IllegalOperation def test_expand__to_dot_is_respected() -> None: s = Search().query("match", a__b=42, _expand__to_dot=False) assert {"query": {"match": {"a__b": 42}}} == s.to_dict() @pytest.mark.sync def test_execute_uses_cache() -> None: s = Search() r = object() s._response = r # type: ignore[assignment] assert r is s.execute() @pytest.mark.sync def test_cache_can_be_ignored(mock_client: Any) -> None: s = Search(using="mock") r = object() s._response = r # type: ignore[assignment] s.execute(ignore_cache=True) mock_client.search.assert_called_once_with(index=None, body={}) @pytest.mark.sync def test_iter_iterates_over_hits() -> None: s = Search() s._response = [1, 2, 3] # type: ignore[assignment] assert [1, 2, 3] == [hit for hit in s] def test_cache_isnt_cloned() -> None: s = Search() s._response = object() # type: ignore[assignment] assert not hasattr(s._clone(), "_response") def test_search_starts_with_no_query() -> None: s = Search() assert s.query._proxied is None def test_search_query_combines_query() -> None: s = Search() s2 = s.query("match", f=42) assert s2.query._proxied == query.Match(f=42) assert s.query._proxied is None s3 = s2.query("match", f=43) assert s2.query._proxied == query.Match(f=42) assert s3.query._proxied == query.Bool(must=[query.Match(f=42), query.Match(f=43)]) def test_query_can_be_assigned_to() -> None: s = Search() q = Q("match", title="python") s.query = q # type: ignore assert s.query._proxied is q def test_query_can_be_wrapped() -> None: s = Search().query("match", title="python") s.query = Q("function_score", query=s.query, field_value_factor={"field": "rating"}) # type: ignore assert { "query": { "function_score": { "functions": [{"field_value_factor": {"field": "rating"}}], "query": {"match": {"title": "python"}}, } } } == s.to_dict() def test_using() -> None: o = object() o2 = object() s = Search(using=o) assert s._using is o s2 = s.using(o2) # type: ignore[arg-type] assert s._using is o assert s2._using is o2 def test_methods_are_proxied_to_the_query() -> None: s = Search().query("match_all") assert s.query.to_dict() == {"match_all": {}} def test_query_always_returns_search() -> None: s = Search() assert isinstance(s.query("match", f=42), Search) def test_source_copied_on_clone() -> None: s = Search().source(False) assert s._clone()._source == s._source assert s._clone()._source is False s2 = Search().source([]) assert s2._clone()._source == s2._source assert s2._source == [] s3 = Search().source(["some", "fields"]) assert s3._clone()._source == s3._source assert s3._clone()._source == ["some", "fields"] def test_copy_clones() -> None: from copy import copy s1 = Search().source(["some", "fields"]) s2 = copy(s1) assert s1 == s2 assert s1 is not s2 def test_aggs_allow_two_metric() -> None: s = Search() s.aggs.metric("a", "max", field="a").metric("b", "max", field="b") assert s.to_dict() == { "aggs": {"a": {"max": {"field": "a"}}, "b": {"max": {"field": "b"}}} } def test_aggs_get_copied_on_change() -> None: s = Search().query("match_all") s.aggs.bucket("per_tag", "terms", field="f").metric( "max_score", "max", field="score" ) s2 = s.query("match_all") s2.aggs.bucket("per_month", "date_histogram", field="date", interval="month") s3 = s2.query("match_all") s3.aggs["per_month"].metric("max_score", "max", field="score") s4 = s3._clone() s4.aggs.metric("max_score", "max", field="score") d: Any = { "query": {"match_all": {}}, "aggs": { "per_tag": { "terms": {"field": "f"}, "aggs": {"max_score": {"max": {"field": "score"}}}, } }, } assert d == s.to_dict() d["aggs"]["per_month"] = {"date_histogram": {"field": "date", "interval": "month"}} assert d == s2.to_dict() d["aggs"]["per_month"]["aggs"] = {"max_score": {"max": {"field": "score"}}} assert d == s3.to_dict() d["aggs"]["max_score"] = {"max": {"field": "score"}} assert d == s4.to_dict() def test_search_index() -> None: s = Search(index="i") assert s._index == ["i"] s = s.index("i2") assert s._index == ["i", "i2"] s = s.index("i3") assert s._index == ["i", "i2", "i3"] s = s.index() assert s._index is None s = Search(index=("i", "i2")) assert s._index == ["i", "i2"] s = Search(index=["i", "i2"]) assert s._index == ["i", "i2"] s = Search() s = s.index("i", "i2") assert s._index == ["i", "i2"] s2 = s.index("i3") assert s._index == ["i", "i2"] assert s2._index == ["i", "i2", "i3"] s = Search() s = s.index(["i", "i2"], "i3") assert s._index == ["i", "i2", "i3"] s2 = s.index("i4") assert s._index == ["i", "i2", "i3"] assert s2._index == ["i", "i2", "i3", "i4"] s2 = s.index(["i4"]) assert s2._index == ["i", "i2", "i3", "i4"] s2 = s.index(("i4", "i5")) assert s2._index == ["i", "i2", "i3", "i4", "i5"] def test_doc_type_document_class() -> None: class MyDocument(Document): pass s = Search(doc_type=MyDocument) assert s._doc_type == [MyDocument] assert s._doc_type_map == {} s = Search().doc_type(MyDocument) assert s._doc_type == [MyDocument] assert s._doc_type_map == {} def test_knn() -> None: s = Search() with raises(TypeError): s.knn() # type: ignore[call-arg] with raises(TypeError): s.knn("field") # type: ignore[call-arg] with raises(TypeError): s.knn("field", 5) # type: ignore[call-arg] with raises(ValueError): s.knn("field", 5, 100) with raises(ValueError): s.knn("field", 5, 100, query_vector=[1, 2, 3], query_vector_builder={}) s = s.knn("field", 5, 100, query_vector=[1, 2, 3]) assert { "knn": { "field": "field", "k": 5, "num_candidates": 100, "query_vector": [1, 2, 3], } } == s.to_dict() s = s.knn( k=4, num_candidates=40, boost=0.8, field="name", query_vector_builder={ "text_embedding": {"model_id": "foo", "model_text": "search text"} }, inner_hits={"size": 1}, ) assert { "knn": [ { "field": "field", "k": 5, "num_candidates": 100, "query_vector": [1, 2, 3], }, { "field": "name", "k": 4, "num_candidates": 40, "query_vector_builder": { "text_embedding": {"model_id": "foo", "model_text": "search text"} }, "boost": 0.8, "inner_hits": {"size": 1}, }, ] } == s.to_dict() def test_rank() -> None: s = Search() s.rank(rrf=False) assert {} == s.to_dict() s = s.rank(rrf=True) assert {"rank": {"rrf": {}}} == s.to_dict() s = s.rank(rrf={"window_size": 50, "rank_constant": 20}) assert {"rank": {"rrf": {"window_size": 50, "rank_constant": 20}}} == s.to_dict() def test_sort() -> None: s = Search() s = s.sort("fielda", "-fieldb") assert ["fielda", {"fieldb": {"order": "desc"}}] == s._sort assert {"sort": ["fielda", {"fieldb": {"order": "desc"}}]} == s.to_dict() s = s.sort() assert [] == s._sort assert Search().to_dict() == s.to_dict() def test_sort_by_score() -> None: s = Search() s = s.sort("_score") assert {"sort": ["_score"]} == s.to_dict() s = Search() with raises(IllegalOperation): s.sort("-_score") def test_collapse() -> None: s = Search() inner_hits = {"name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}]} s = s.collapse("user.id", inner_hits=inner_hits, max_concurrent_group_searches=4) assert { "field": "user.id", "inner_hits": { "name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}], }, "max_concurrent_group_searches": 4, } == s._collapse assert { "collapse": { "field": "user.id", "inner_hits": { "name": "most_recent", "size": 5, "sort": [{"@timestamp": "desc"}], }, "max_concurrent_group_searches": 4, } } == s.to_dict() s = s.collapse() assert {} == s._collapse assert Search().to_dict() == s.to_dict() def test_slice() -> None: s = Search() assert {"from": 3, "size": 7} == s[3:10].to_dict() assert {"size": 5} == s[:5].to_dict() assert {"from": 3} == s[3:].to_dict() assert {"from": 0, "size": 0} == s[0:0].to_dict() assert {"from": 20, "size": 0} == s[20:0].to_dict() assert {"from": 10, "size": 5} == s[10:][:5].to_dict() assert {"from": 10, "size": 0} == s[:5][10:].to_dict() assert {"size": 10} == s[:10][:40].to_dict() assert {"size": 10} == s[:40][:10].to_dict() assert {"size": 40} == s[:40][:80].to_dict() assert {"from": 12, "size": 0} == s[:5][10:][2:].to_dict() assert {"from": 15, "size": 0} == s[10:][:5][5:].to_dict() assert {} == s[:].to_dict() with raises(ValueError): s[-1:] with raises(ValueError): s[4:-1] with raises(ValueError): s[-3:-2] def test_index() -> None: s = Search() assert {"from": 3, "size": 1} == s[3].to_dict() assert {"from": 3, "size": 1} == s[3][0].to_dict() assert {"from": 8, "size": 0} == s[3][5].to_dict() assert {"from": 4, "size": 1} == s[3:10][1].to_dict() with raises(ValueError): s[-3] def test_search_to_dict() -> None: s = Search() assert {} == s.to_dict() s = s.query("match", f=42) assert {"query": {"match": {"f": 42}}} == s.to_dict() assert {"query": {"match": {"f": 42}}, "size": 10} == s.to_dict(size=10) s.aggs.bucket("per_tag", "terms", field="f").metric( "max_score", "max", field="score" ) d = { "aggs": { "per_tag": { "terms": {"field": "f"}, "aggs": {"max_score": {"max": {"field": "score"}}}, } }, "query": {"match": {"f": 42}}, } assert d == s.to_dict() s = Search(extra={"size": 5}) assert {"size": 5} == s.to_dict() s = s.extra(from_=42) assert {"size": 5, "from": 42} == s.to_dict() def test_complex_example() -> None: s = Search() s = ( s.query("match", title="python") .query(~Q("match", title="ruby")) .filter(Q("term", category="meetup") | Q("term", category="conference")) .collapse("user_id") .post_filter("terms", tags=["prague", "czech"]) .script_fields(more_attendees="doc['attendees'].value + 42") ) s.aggs.bucket("per_country", "terms", field="country").metric( "avg_attendees", "avg", field="attendees" ) s.query.minimum_should_match = 2 s = s.highlight_options(order="score").highlight("title", "body", fragment_size=50) assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } }, "post_filter": {"terms": {"tags": ["prague", "czech"]}}, "aggs": { "per_country": { "terms": {"field": "country"}, "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, } }, "collapse": {"field": "user_id"}, "highlight": { "order": "score", "fields": {"title": {"fragment_size": 50}, "body": {"fragment_size": 50}}, }, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } == s.to_dict() def test_reverse() -> None: d = { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [ { "bool": { "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } } ], } }, "post_filter": {"bool": {"must": [{"terms": {"tags": ["prague", "czech"]}}]}}, "aggs": { "per_country": { "terms": {"field": "country"}, "aggs": {"avg_attendees": {"avg": {"field": "attendees"}}}, } }, "sort": ["title", {"category": {"order": "desc"}}, "_score"], "size": 5, "highlight": {"order": "score", "fields": {"title": {"fragment_size": 50}}}, "suggest": { "my-title-suggestions-1": { "text": "devloping distibutd saerch engies", "term": {"size": 3, "field": "title"}, } }, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } d2 = deepcopy(d) s = Search.from_dict(d) # make sure we haven't modified anything in place assert d == d2 assert {"size": 5} == s._extra assert d == s.to_dict() def test_code_generated_classes() -> None: s = Search() s = ( s.query(query.Match("title", types.MatchQuery(query="python"))) .query(~query.Match("title", types.MatchQuery(query="ruby"))) .query( query.Knn( field="title", query_vector=[1.0, 2.0, 3.0], num_candidates=10, k=3, filter=query.Range("year", wrappers.Range(gt="2004")), ) ) .filter( query.Term("category", types.TermQuery(value="meetup")) | query.Term("category", types.TermQuery(value="conference")) ) .collapse("user_id") .post_filter(query.Terms(tags=["prague", "czech"])) .script_fields(more_attendees="doc['attendees'].value + 42") ) assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": {"value": "meetup"}}}, {"term": {"category": {"value": "conference"}}}, ] } } ], "must": [ {"match": {"title": {"query": "python"}}}, { "knn": { "field": "title", "filter": [ { "range": { "year": { "gt": "2004", }, }, }, ], "k": 3, "num_candidates": 10, "query_vector": [ 1.0, 2.0, 3.0, ], }, }, ], "must_not": [{"match": {"title": {"query": "ruby"}}}], } }, "post_filter": {"terms": {"tags": ["prague", "czech"]}}, "collapse": {"field": "user_id"}, "script_fields": {"more_attendees": {"script": "doc['attendees'].value + 42"}}, } == s.to_dict() def test_from_dict_doesnt_need_query() -> None: s = Search.from_dict({"size": 5}) assert {"size": 5} == s.to_dict() @pytest.mark.sync def test_params_being_passed_to_search(mock_client: Any) -> None: s = Search(using="mock") s = s.params(routing="42") s.execute() mock_client.search.assert_called_once_with(index=None, body={}, routing="42") def test_source() -> None: assert {} == Search().source().to_dict() assert { "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]} } == Search().source(includes=["foo.bar.*"], excludes=("foo.one",)).to_dict() assert {"_source": False} == Search().source(False).to_dict() assert {"_source": ["f1", "f2"]} == Search().source( includes=["foo.bar.*"], excludes=["foo.one"] ).source(["f1", "f2"]).to_dict() def test_source_on_clone() -> None: assert { "_source": {"includes": ["foo.bar.*"], "excludes": ["foo.one"]}, "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, } == Search().source(includes=["foo.bar.*"]).source(excludes=["foo.one"]).filter( "term", title="python" ).to_dict() assert { "_source": False, "query": {"bool": {"filter": [{"term": {"title": "python"}}]}}, } == Search().source(False).filter("term", title="python").to_dict() def test_source_on_clear() -> None: assert ( {} == Search() .source(includes=["foo.bar.*"]) .source(includes=None, excludes=None) .to_dict() ) def test_suggest_accepts_global_text() -> None: s = Search.from_dict( { "suggest": { "text": "the amsterdma meetpu", "my-suggest-1": {"term": {"field": "title"}}, "my-suggest-2": {"text": "other", "term": {"field": "body"}}, } } ) assert { "suggest": { "my-suggest-1": { "term": {"field": "title"}, "text": "the amsterdma meetpu", }, "my-suggest-2": {"term": {"field": "body"}, "text": "other"}, } } == s.to_dict() def test_suggest() -> None: s = Search() s = s.suggest("my_suggestion", "pyhton", term={"field": "title"}) assert { "suggest": {"my_suggestion": {"term": {"field": "title"}, "text": "pyhton"}} } == s.to_dict() def test_exclude() -> None: s = Search() s = s.exclude("match", title="python") assert { "query": { "bool": { "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] } } } == s.to_dict() @pytest.mark.sync def test_delete_by_query(mock_client: Any) -> None: s = Search(using="mock", index="i").query("match", lang="java") s.delete() mock_client.delete_by_query.assert_called_once_with( index=["i"], body={"query": {"match": {"lang": "java"}}} ) def test_update_from_dict() -> None: s = Search() s.update_from_dict({"indices_boost": [{"important-documents": 2}]}) s.update_from_dict({"_source": ["id", "name"]}) s.update_from_dict({"collapse": {"field": "user_id"}}) assert { "indices_boost": [{"important-documents": 2}], "_source": ["id", "name"], "collapse": {"field": "user_id"}, } == s.to_dict() def test_rescore_query_to_dict() -> None: s = Search(index="index-name") positive_query = Q( "function_score", query=Q("term", tags="a"), script_score={"script": "_score * 1"}, ) negative_query = Q( "function_score", query=Q("term", tags="b"), script_score={"script": "_score * -100"}, ) s = s.query(positive_query) s = s.extra( rescore={"window_size": 100, "query": {"rescore_query": negative_query}} ) assert s.to_dict() == { "query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } }, "rescore": { "window_size": 100, "query": { "rescore_query": { "function_score": { "query": {"term": {"tags": "b"}}, "functions": [{"script_score": {"script": "_score * -100"}}], } } }, }, } assert s.to_dict( rescore={"window_size": 10, "query": {"rescore_query": positive_query}} ) == { "query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } }, "rescore": { "window_size": 10, "query": { "rescore_query": { "function_score": { "query": {"term": {"tags": "a"}}, "functions": [{"script_score": {"script": "_score * 1"}}], } } }, }, } @pytest.mark.sync def test_empty_search() -> None: s = EmptySearch(index="index-name") s = s.query("match", lang="java") s.aggs.bucket("versions", "terms", field="version") assert s.count() == 0 assert [hit for hit in s] == [] assert [hit for hit in s.scan()] == [] s.delete() # should not error def test_suggest_completion() -> None: s = Search() s = s.suggest("my_suggestion", "pyhton", completion={"field": "title"}) assert { "suggest": { "my_suggestion": {"completion": {"field": "title"}, "prefix": "pyhton"} } } == s.to_dict() def test_suggest_regex_query() -> None: s = Search() s = s.suggest("my_suggestion", regex="py[thon|py]", completion={"field": "title"}) assert { "suggest": { "my_suggestion": {"completion": {"field": "title"}, "regex": "py[thon|py]"} } } == s.to_dict() def test_suggest_must_pass_text_or_regex() -> None: s = Search() with raises(ValueError): s.suggest("my_suggestion") def test_suggest_can_only_pass_text_or_regex() -> None: s = Search() with raises(ValueError): s.suggest("my_suggestion", text="python", regex="py[hton|py]") def test_suggest_regex_must_be_wtih_completion() -> None: s = Search() with raises(ValueError): s.suggest("my_suggestion", regex="py[thon|py]") python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/_sync/test_update_by_query.py000066400000000000000000000125731506101734100313000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from copy import deepcopy from typing import Any import pytest from elasticsearch.dsl import Q, UpdateByQuery from elasticsearch.dsl.response import UpdateByQueryResponse from elasticsearch.dsl.search_base import SearchBase def test_ubq_starts_with_no_query() -> None: ubq = UpdateByQuery() assert ubq.query._proxied is None def test_ubq_to_dict() -> None: ubq = UpdateByQuery() assert {} == ubq.to_dict() ubq = ubq.query("match", f=42) assert {"query": {"match": {"f": 42}}} == ubq.to_dict() assert {"query": {"match": {"f": 42}}, "size": 10} == ubq.to_dict(size=10) ubq = UpdateByQuery(extra={"size": 5}) assert {"size": 5} == ubq.to_dict() ubq = UpdateByQuery(extra={"extra_q": Q("term", category="conference")}) assert {"extra_q": {"term": {"category": "conference"}}} == ubq.to_dict() def test_complex_example() -> None: ubq = UpdateByQuery() ubq = ( ubq.query("match", title="python") .query(~Q("match", title="ruby")) .filter(Q("term", category="meetup") | Q("term", category="conference")) .script( source="ctx._source.likes += params.f", lang="painless", params={"f": 3} ) ) ubq.query.minimum_should_match = 2 assert { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } }, "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, }, } == ubq.to_dict() def test_exclude() -> None: ubq = UpdateByQuery() ubq = ubq.exclude("match", title="python") assert { "query": { "bool": { "filter": [{"bool": {"must_not": [{"match": {"title": "python"}}]}}] } } } == ubq.to_dict() def test_reverse() -> None: d = { "query": { "bool": { "filter": [ { "bool": { "should": [ {"term": {"category": "meetup"}}, {"term": {"category": "conference"}}, ] } } ], "must": [ { "bool": { "must": [{"match": {"title": "python"}}], "must_not": [{"match": {"title": "ruby"}}], "minimum_should_match": 2, } } ], } }, "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, }, } d2 = deepcopy(d) ubq = UpdateByQuery.from_dict(d) assert d == d2 assert d == ubq.to_dict() def test_from_dict_doesnt_need_query() -> None: ubq = UpdateByQuery.from_dict({"script": {"source": "test"}}) assert {"script": {"source": "test"}} == ubq.to_dict() @pytest.mark.sync def test_params_being_passed_to_search(mock_client: Any) -> None: ubq = UpdateByQuery(using="mock", index="i") ubq = ubq.params(routing="42") ubq.execute() mock_client.update_by_query.assert_called_once_with(index=["i"], routing="42") def test_overwrite_script() -> None: ubq = UpdateByQuery() ubq = ubq.script( source="ctx._source.likes += params.f", lang="painless", params={"f": 3} ) assert { "script": { "source": "ctx._source.likes += params.f", "lang": "painless", "params": {"f": 3}, } } == ubq.to_dict() ubq = ubq.script(source="ctx._source.likes++") assert {"script": {"source": "ctx._source.likes++"}} == ubq.to_dict() def test_update_by_query_response_success() -> None: ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": []}) assert ubqr.success() ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": True, "failures": []}) assert not ubqr.success() ubqr = UpdateByQueryResponse(SearchBase(), {"timed_out": False, "failures": [{}]}) assert not ubqr.success() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/async_sleep.py000066400000000000000000000016761506101734100262340ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio from typing import Union async def sleep(secs: Union[int, float]) -> None: """Tests can use this function to sleep.""" await asyncio.sleep(secs) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/conftest.py000066400000000000000000000405471506101734100255540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import asyncio import os import re import time from datetime import datetime from typing import Any, AsyncGenerator, Dict, Generator, Tuple, cast from unittest import SkipTest from unittest.mock import AsyncMock, Mock import pytest_asyncio from elastic_transport import ObjectApiResponse from pytest import fixture, skip from elasticsearch import AsyncElasticsearch, Elasticsearch from elasticsearch.dsl import Search from elasticsearch.dsl.async_connections import add_connection as add_async_connection from elasticsearch.dsl.async_connections import connections as async_connections from elasticsearch.dsl.connections import add_connection, connections from elasticsearch.exceptions import ConnectionError from elasticsearch.helpers import bulk from ..utils import CA_CERTS, wipe_cluster from .test_integration._async import test_document as async_document from .test_integration._sync import test_document as sync_document from .test_integration.test_data import ( DATA, FLAT_DATA, TEST_GIT_DATA, create_flat_git_index, create_git_index, ) def get_test_client( elasticsearch_url, wait: bool = True, **kwargs: Any ) -> Elasticsearch: # construct kwargs from the environment kw: Dict[str, Any] = {"request_timeout": 30} if elasticsearch_url.startswith("https://"): kw["ca_certs"] = CA_CERTS if "PYTHON_CONNECTION_CLASS" in os.environ: kw["node_class"] = os.environ["PYTHON_CONNECTION_CLASS"] kw.update(kwargs) client = Elasticsearch(elasticsearch_url, **kw) # wait for yellow status for tries_left in range(100 if wait else 1, 0, -1): try: client.cluster.health(wait_for_status="yellow") return client except ConnectionError: if wait and tries_left == 1: raise time.sleep(0.1) raise SkipTest("Elasticsearch failed to start.") async def get_async_test_client( elasticsearch_url, wait: bool = True, **kwargs: Any ) -> AsyncElasticsearch: # construct kwargs from the environment kw: Dict[str, Any] = {"request_timeout": 30} if elasticsearch_url.startswith("https://"): kw["ca_certs"] = CA_CERTS kw.update(kwargs) client = AsyncElasticsearch(elasticsearch_url, **kw) # wait for yellow status for tries_left in range(100 if wait else 1, 0, -1): try: await client.cluster.health(wait_for_status="yellow") return client except ConnectionError: if wait and tries_left == 1: raise await asyncio.sleep(0.1) await client.close() raise SkipTest("Elasticsearch failed to start.") def _get_version(version_string: str) -> Tuple[int, ...]: if "." not in version_string: return () version = version_string.strip().split(".") return tuple(int(v) if v.isdigit() else 999 for v in version) @fixture def client(elasticsearch_url) -> Elasticsearch: try: connection = get_test_client( elasticsearch_url, wait="WAIT_FOR_ES" in os.environ ) add_connection("default", connection) yield connection wipe_cluster(connection) connection.close() except SkipTest: skip() @pytest_asyncio.fixture async def async_client(elasticsearch_url) -> AsyncGenerator[AsyncElasticsearch, None]: try: connection = await get_async_test_client( elasticsearch_url, wait="WAIT_FOR_ES" in os.environ ) add_async_connection("default", connection) yield connection wipe_cluster(connection) await connection.close() except SkipTest: skip() @fixture def es_version(client: Elasticsearch) -> Generator[Tuple[int, ...], None, None]: info = client.info() yield tuple( int(x) for x in re.match(r"^([0-9.]+)", info["version"]["number"]).group(1).split(".") # type: ignore ) @fixture def write_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]: yield client for index_name in client.indices.get(index="test-*", expand_wildcards="all"): client.indices.delete(index=index_name) client.options(ignore_status=404).indices.delete_template(name="test-template") client.options(ignore_status=404).indices.delete_index_template( name="test-template" ) @pytest_asyncio.fixture async def async_write_client( write_client: Elasticsearch, async_client: AsyncElasticsearch ) -> AsyncGenerator[AsyncElasticsearch, None]: yield async_client @fixture def mock_client( dummy_response: ObjectApiResponse[Any], ) -> Generator[Elasticsearch, None, None]: client = Mock() client.search.return_value = dummy_response client.update_by_query.return_value = dummy_response add_connection("mock", client) yield client connections._conns = {} connections._kwargs = {} @fixture def async_mock_client( dummy_response: ObjectApiResponse[Any], ) -> Generator[Elasticsearch, None, None]: client = Mock() client.search = AsyncMock(return_value=dummy_response) client.indices = AsyncMock() client.update_by_query = AsyncMock() client.delete_by_query = AsyncMock() add_async_connection("mock", client) yield client async_connections._conns = {} async_connections._kwargs = {} @fixture def data_client(client: Elasticsearch) -> Generator[Elasticsearch, None, None]: # create mappings create_git_index(client, "git") create_flat_git_index(client, "flat-git") # load data bulk(client, DATA, raise_on_error=True, refresh=True) bulk(client, FLAT_DATA, raise_on_error=True, refresh=True) yield client client.options(ignore_status=404).indices.delete(index="git") client.options(ignore_status=404).indices.delete(index="flat-git") @pytest_asyncio.fixture async def async_data_client( data_client: Elasticsearch, async_client: AsyncElasticsearch ) -> AsyncGenerator[AsyncElasticsearch, None]: yield async_client @fixture def dummy_response() -> ObjectApiResponse[Any]: return ObjectApiResponse( meta=None, body={ "_shards": {"failed": 0, "successful": 10, "total": 10}, "hits": { "hits": [ { "_index": "test-index", "_type": "company", "_id": "elasticsearch", "_score": 12.0, "_source": {"city": "Amsterdam", "name": "Elasticsearch"}, }, { "_index": "test-index", "_type": "employee", "_id": "42", "_score": 11.123, "_routing": "elasticsearch", "_source": { "name": {"first": "Shay", "last": "Bannon"}, "lang": "java", "twitter": "kimchy", }, }, { "_index": "test-index", "_type": "employee", "_id": "47", "_score": 1, "_routing": "elasticsearch", "_source": { "name": {"first": "Honza", "last": "Král"}, "lang": "python", "twitter": "honzakral", }, }, { "_index": "test-index", "_type": "employee", "_id": "53", "_score": 16.0, "_routing": "elasticsearch", }, ], "max_score": 12.0, "total": 123, }, "timed_out": False, "took": 123, }, ) @fixture def aggs_search() -> Search: s = Search(index="flat-git") s.aggs.bucket("popular_files", "terms", field="files", size=2).metric( "line_stats", "stats", field="stats.lines" ).metric("top_commits", "top_hits", size=2, _source=["stats.*", "committed_date"]) s.aggs.bucket( "per_month", "date_histogram", interval="month", field="info.committed_date" ) s.aggs.metric("sum_lines", "sum", field="stats.lines") return s @fixture def aggs_data() -> Dict[str, Any]: return { "took": 4, "timed_out": False, "_shards": {"total": 1, "successful": 1, "failed": 0}, "hits": {"total": 52, "hits": [], "max_score": 0.0}, "aggregations": { "sum_lines": {"value": 25052.0}, "per_month": { "buckets": [ { "doc_count": 38, "key": 1393632000000, "key_as_string": "2014-03-01T00:00:00.000Z", }, { "doc_count": 11, "key": 1396310400000, "key_as_string": "2014-04-01T00:00:00.000Z", }, { "doc_count": 3, "key": 1398902400000, "key_as_string": "2014-05-01T00:00:00.000Z", }, ] }, "popular_files": { "buckets": [ { "key": "elasticsearch_dsl", "line_stats": { "count": 40, "max": 228.0, "min": 2.0, "sum": 2151.0, "avg": 53.775, }, "doc_count": 40, "top_commits": { "hits": { "total": 40, "hits": [ { "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", "_type": "doc", "_source": { "stats": { "files": 4, "deletions": 7, "lines": 30, "insertions": 23, }, "committed_date": "2014-05-02T13:47:19", }, "_score": 1.0, "_index": "flat-git", }, { "_id": "eb3e543323f189fd7b698e66295427204fff5755", "_type": "doc", "_source": { "stats": { "files": 1, "deletions": 0, "lines": 18, "insertions": 18, }, "committed_date": "2014-05-01T13:32:14", }, "_score": 1.0, "_index": "flat-git", }, ], "max_score": 1.0, } }, }, { "key": "test_elasticsearch_dsl", "line_stats": { "count": 35, "max": 228.0, "min": 2.0, "sum": 1939.0, "avg": 55.4, }, "doc_count": 35, "top_commits": { "hits": { "total": 35, "hits": [ { "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", "_type": "doc", "_source": { "stats": { "files": 4, "deletions": 7, "lines": 30, "insertions": 23, }, "committed_date": "2014-05-02T13:47:19", }, "_score": 1.0, "_index": "flat-git", }, { "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157", "_type": "doc", "_source": { "stats": { "files": 3, "deletions": 18, "lines": 62, "insertions": 44, }, "committed_date": "2014-05-01T13:30:44", }, "_score": 1.0, "_index": "flat-git", }, ], "max_score": 1.0, } }, }, ], "doc_count_error_upper_bound": 0, "sum_other_doc_count": 120, }, }, } def make_pr(pr_module: Any) -> Any: return pr_module.PullRequest( _id=42, comments=[ pr_module.Comment( content="Hello World!", author=pr_module.User(name="honzakral"), created_at=datetime(2018, 1, 9, 10, 17, 3, 21184), history=[ pr_module.History( timestamp=datetime(2012, 1, 1), diff="-Ahoj Svete!\n+Hello World!", ) ], ), ], created_at=datetime(2018, 1, 9, 9, 17, 3, 21184), ) @fixture def pull_request(write_client: Elasticsearch) -> sync_document.PullRequest: sync_document.PullRequest.init() pr = cast(sync_document.PullRequest, make_pr(sync_document)) pr.save(refresh=True) return pr @pytest_asyncio.fixture async def async_pull_request( async_write_client: AsyncElasticsearch, ) -> async_document.PullRequest: await async_document.PullRequest.init() pr = cast(async_document.PullRequest, make_pr(async_document)) await pr.save(refresh=True) return pr @fixture def setup_ubq_tests(client: Elasticsearch) -> str: index = "test-git" create_git_index(client, index) bulk(client, TEST_GIT_DATA, raise_on_error=True, refresh=True) return index python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/sleep.py000066400000000000000000000016541506101734100250330ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import time from typing import Union def sleep(secs: Union[int, float]) -> None: """Tests can use this function to sleep.""" time.sleep(secs) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_aggs.py000066400000000000000000000403601506101734100257000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from pytest import raises from elasticsearch.dsl import aggs, query, types def test_repr() -> None: max_score = aggs.Max(field="score") a = aggs.A("terms", field="tags", aggs={"max_score": max_score}) assert "Terms(aggs={'max_score': Max(field='score')}, field='tags')" == repr(a) def test_meta() -> None: max_score = aggs.Max(field="score") a = aggs.A( "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"} ) assert { "terms": {"field": "tags"}, "aggs": {"max_score": {"max": {"field": "score"}}}, "meta": {"some": "metadata"}, } == a.to_dict() def test_meta_from_dict() -> None: max_score = aggs.Max(field="score") a = aggs.A( "terms", field="tags", aggs={"max_score": max_score}, meta={"some": "metadata"} ) assert aggs.A(a.to_dict()) == a def test_A_creates_proper_agg() -> None: a = aggs.A("terms", field="tags") assert isinstance(a, aggs.Terms) assert a._params == {"field": "tags"} def test_A_handles_nested_aggs_properly() -> None: max_score = aggs.Max(field="score") a = aggs.A("terms", field="tags", aggs={"max_score": max_score}) assert isinstance(a, aggs.Terms) assert a._params == {"field": "tags", "aggs": {"max_score": max_score}} def test_A_passes_aggs_through() -> None: a = aggs.A("terms", field="tags") assert aggs.A(a) is a def test_A_from_dict() -> None: d = { "terms": {"field": "tags"}, "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, } a = aggs.A(d) assert isinstance(a, aggs.Terms) assert a._params == { "field": "tags", "aggs": {"per_author": aggs.A("terms", field="author.raw")}, } assert a["per_author"] == aggs.A("terms", field="author.raw") assert a.aggs.per_author == aggs.A("terms", field="author.raw") # type: ignore[attr-defined] def test_A_fails_with_incorrect_dict() -> None: correct_d = { "terms": {"field": "tags"}, "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, } with raises(Exception): aggs.A(correct_d, field="f") d = correct_d.copy() del d["terms"] with raises(Exception): aggs.A(d) d = correct_d.copy() d["xx"] = {} with raises(Exception): aggs.A(d) def test_A_fails_with_agg_and_params() -> None: a = aggs.A("terms", field="tags") with raises(Exception): aggs.A(a, field="score") def test_buckets_are_nestable() -> None: a = aggs.Terms(field="tags") b = a.bucket("per_author", "terms", field="author.raw") assert isinstance(b, aggs.Terms) assert b._params == {"field": "author.raw"} assert a.aggs == {"per_author": b} def test_metric_inside_buckets() -> None: a = aggs.Terms(field="tags") b = a.metric("max_score", "max", field="score") # returns bucket so it's chainable assert a is b assert a.aggs["max_score"] == aggs.Max(field="score") def test_buckets_equals_counts_subaggs() -> None: a = aggs.Terms(field="tags") a.bucket("per_author", "terms", field="author.raw") b = aggs.Terms(field="tags") assert a != b def test_buckets_to_dict() -> None: a = aggs.Terms(field="tags") a.bucket("per_author", "terms", field="author.raw") assert { "terms": {"field": "tags"}, "aggs": {"per_author": {"terms": {"field": "author.raw"}}}, } == a.to_dict() a = aggs.Terms(field="tags") a.metric("max_score", "max", field="score") assert { "terms": {"field": "tags"}, "aggs": {"max_score": {"max": {"field": "score"}}}, } == a.to_dict() def test_nested_buckets_are_reachable_as_getitem() -> None: a = aggs.Terms(field="tags") b = a.bucket("per_author", "terms", field="author.raw") assert a["per_author"] is not b assert a["per_author"] == b def test_nested_buckets_are_settable_as_getitem() -> None: a = aggs.Terms(field="tags") b = a["per_author"] = aggs.A("terms", field="author.raw") assert a.aggs["per_author"] is b def test_filter_can_be_instantiated_using_positional_args() -> None: a = aggs.Filter(query.Q("term", f=42)) assert {"filter": {"term": {"f": 42}}} == a.to_dict() assert a == aggs.A("filter", query.Q("term", f=42)) def test_filter_aggregation_as_nested_agg() -> None: a = aggs.Terms(field="tags") a.bucket("filtered", "filter", query.Q("term", f=42)) assert { "terms": {"field": "tags"}, "aggs": {"filtered": {"filter": {"term": {"f": 42}}}}, } == a.to_dict() def test_filter_aggregation_with_nested_aggs() -> None: a = aggs.Filter(query.Q("term", f=42)) a.bucket("testing", "terms", field="tags") assert { "filter": {"term": {"f": 42}}, "aggs": {"testing": {"terms": {"field": "tags"}}}, } == a.to_dict() def test_filters_correctly_identifies_the_hash() -> None: a = aggs.A( "filters", filters={ "group_a": {"term": {"group": "a"}}, "group_b": {"term": {"group": "b"}}, }, ) assert { "filters": { "filters": { "group_a": {"term": {"group": "a"}}, "group_b": {"term": {"group": "b"}}, } } } == a.to_dict() assert a.filters.group_a == query.Q("term", group="a") def test_bucket_sort_agg() -> None: # test the dictionary (type ignored) and fully typed alterantives bucket_sort_agg = aggs.BucketSort(sort=[{"total_sales": {"order": "desc"}}], size=3) # type: ignore assert bucket_sort_agg.to_dict() == { "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} } bucket_sort_agg = aggs.BucketSort( sort=[types.SortOptions("total_sales", types.FieldSort(order="desc"))], size=3 ) assert bucket_sort_agg.to_dict() == { "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} } a = aggs.DateHistogram(field="date", interval="month") a.bucket("total_sales", "sum", field="price") a.bucket( "sales_bucket_sort", "bucket_sort", sort=[{"total_sales": {"order": "desc"}}], size=3, ) assert { "date_histogram": {"field": "date", "interval": "month"}, "aggs": { "total_sales": {"sum": {"field": "price"}}, "sales_bucket_sort": { "bucket_sort": {"sort": [{"total_sales": {"order": "desc"}}], "size": 3} }, }, } == a.to_dict() def test_bucket_sort_agg_only_trnunc() -> None: # test the dictionary (type ignored) and fully typed alterantives bucket_sort_agg = aggs.BucketSort(**{"from": 1, "size": 1, "_expand__to_dot": False}) # type: ignore assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}} bucket_sort_agg = aggs.BucketSort(from_=1, size=1, _expand__to_dot=False) assert bucket_sort_agg.to_dict() == {"bucket_sort": {"from": 1, "size": 1}} a = aggs.DateHistogram(field="date", interval="month") a.bucket("bucket_truncate", "bucket_sort", **{"from": 1, "size": 1}) assert { "date_histogram": {"field": "date", "interval": "month"}, "aggs": {"bucket_truncate": {"bucket_sort": {"from": 1, "size": 1}}}, } == a.to_dict() def test_geohash_grid_aggregation() -> None: # test the dictionary (type ignored) and fully typed alterantives a = aggs.GeohashGrid(**{"field": "centroid", "precision": 3}) # type: ignore assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict() a = aggs.GeohashGrid(field="centroid", precision=3) assert {"geohash_grid": {"field": "centroid", "precision": 3}} == a.to_dict() def test_geohex_grid_aggregation() -> None: # test the dictionary (type ignored) and fully typed alterantives a = aggs.GeohexGrid(**{"field": "centroid", "precision": 3}) # type: ignore assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict() a = aggs.GeohexGrid(field="centroid", precision=3) assert {"geohex_grid": {"field": "centroid", "precision": 3}} == a.to_dict() def test_geotile_grid_aggregation() -> None: # test the dictionary (type ignored) and fully typed alterantives a = aggs.GeotileGrid(**{"field": "centroid", "precision": 3}) # type: ignore assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict() a = aggs.GeotileGrid(field="centroid", precision=3) assert {"geotile_grid": {"field": "centroid", "precision": 3}} == a.to_dict() def test_boxplot_aggregation() -> None: a = aggs.Boxplot(field="load_time") assert {"boxplot": {"field": "load_time"}} == a.to_dict() def test_rare_terms_aggregation() -> None: a = aggs.RareTerms(field="the-field") a.bucket("total_sales", "sum", field="price") a.bucket( "sales_bucket_sort", "bucket_sort", sort=[{"total_sales": {"order": "desc"}}], size=3, ) assert { "aggs": { "sales_bucket_sort": { "bucket_sort": {"size": 3, "sort": [{"total_sales": {"order": "desc"}}]} }, "total_sales": {"sum": {"field": "price"}}, }, "rare_terms": {"field": "the-field"}, } == a.to_dict() def test_variable_width_histogram_aggregation() -> None: a = aggs.VariableWidthHistogram(field="price", buckets=2) assert {"variable_width_histogram": {"buckets": 2, "field": "price"}} == a.to_dict() def test_ip_prefix_aggregation() -> None: # test the dictionary (type ignored) and fully typed alterantives a = aggs.IPPrefix(**{"field": "ipv4", "prefix_length": 24}) # type: ignore assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict() a = aggs.IPPrefix(field="ipv4", prefix_length=24) assert {"ip_prefix": {"field": "ipv4", "prefix_length": 24}} == a.to_dict() def test_ip_prefix_aggregation_extra() -> None: a = aggs.IPPrefix(field="ipv6", prefix_length=64, is_ipv6=True) assert { "ip_prefix": { "field": "ipv6", "prefix_length": 64, "is_ipv6": True, }, } == a.to_dict() def test_multi_terms_aggregation() -> None: a = aggs.MultiTerms(terms=[{"field": "tags"}, {"field": "author.row"}]) assert { "multi_terms": { "terms": [ {"field": "tags"}, {"field": "author.row"}, ] } } == a.to_dict() a = aggs.MultiTerms( terms=[ types.MultiTermLookup(field="tags"), types.MultiTermLookup(field="author.row"), ] ) assert { "multi_terms": { "terms": [ {"field": "tags"}, {"field": "author.row"}, ] } } == a.to_dict() def test_categorize_text_aggregation() -> None: a = aggs.CategorizeText( field="tags", categorization_filters=["\\w+\\_\\d{3}"], max_matched_tokens=2, similarity_threshold=30, ) assert { "categorize_text": { "field": "tags", "categorization_filters": ["\\w+\\_\\d{3}"], "max_matched_tokens": 2, "similarity_threshold": 30, } } == a.to_dict() def test_median_absolute_deviation_aggregation() -> None: a = aggs.MedianAbsoluteDeviation(field="rating") assert {"median_absolute_deviation": {"field": "rating"}} == a.to_dict() def test_t_test_aggregation() -> None: a = aggs.TTest( a={"field": "startup_time_before"}, b={"field": "startup_time_after"}, type="paired", ) assert { "t_test": { "a": {"field": "startup_time_before"}, "b": {"field": "startup_time_after"}, "type": "paired", } } == a.to_dict() def test_geo_line_aggregation() -> None: a = aggs.GeoLine(point={"field": "centroid"}, sort={"field": "date"}) assert { "geo_line": { "point": {"field": "centroid"}, "sort": {"field": "date"}, }, } == a.to_dict() def test_inference_aggregation() -> None: a = aggs.Inference(model_id="model-id", buckets_path={"agg_name": "agg_name"}) assert { "inference": {"buckets_path": {"agg_name": "agg_name"}, "model_id": "model-id"} } == a.to_dict() def test_matrix_stats_aggregation() -> None: a = aggs.MatrixStats(fields=["poverty", "income"]) assert {"matrix_stats": {"fields": ["poverty", "income"]}} == a.to_dict() def test_moving_percentiles_aggregation() -> None: a = aggs.DateHistogram() a.bucket("the_percentile", "percentiles", field="price", percents=[1.0, 99.0]) a.pipeline( "the_movperc", "moving_percentiles", buckets_path="the_percentile", window=10 ) assert { "aggs": { "the_movperc": { "moving_percentiles": {"buckets_path": "the_percentile", "window": 10} }, "the_percentile": { "percentiles": {"field": "price", "percents": [1.0, 99.0]} }, }, "date_histogram": {}, } == a.to_dict() def test_normalize_aggregation() -> None: a = aggs.Normalize(buckets_path="normalized", method="percent_of_sum") assert { "normalize": {"buckets_path": "normalized", "method": "percent_of_sum"} } == a.to_dict() def test_random_sampler_aggregation() -> None: a = aggs.RandomSampler(probability=0.1).metric( "price_percentiles", "percentiles", field="price", ) assert { "random_sampler": { "probability": 0.1, }, "aggs": { "price_percentiles": { "percentiles": {"field": "price"}, }, }, } == a.to_dict() def test_adjancecy_matrix_aggregation() -> None: a = aggs.AdjacencyMatrix(filters={"grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}}) # type: ignore assert { "adjacency_matrix": { "filters": { "grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}, } } } == a.to_dict() a = aggs.AdjacencyMatrix( filters={ "grpA": query.Terms(accounts=["hillary", "sidney"]), "grpB": query.Terms(accounts=["donald", "mitt"]), "grpC": query.Terms(accounts=["vladimir", "nigel"]), } ) assert { "adjacency_matrix": { "filters": { "grpA": {"terms": {"accounts": ["hillary", "sidney"]}}, "grpB": {"terms": {"accounts": ["donald", "mitt"]}}, "grpC": {"terms": {"accounts": ["vladimir", "nigel"]}}, } } } == a.to_dict() def test_top_metrics_aggregation() -> None: # test the dictionary (type ignored) and fully typed alterantives a = aggs.TopMetrics(metrics={"field": "m"}, sort={"s": "desc"}) # type: ignore assert { "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": "desc"}} } == a.to_dict() a = aggs.TopMetrics( metrics=types.TopMetricsValue(field="m"), sort=types.SortOptions("s", types.FieldSort(order="desc")), ) assert { "top_metrics": {"metrics": {"field": "m"}, "sort": {"s": {"order": "desc"}}} } == a.to_dict() def test_bucket_agg_with_filter() -> None: b = aggs.Filter(query.Terms(something=[1, 2, 3])) a = aggs.Terms(field="some_field", size=100) a.bucket("b", b) assert a.aggs["b"] == a["b"] # a['b'] threw exception before patch #1902 python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_analysis.py000066400000000000000000000146331506101734100266060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from pytest import raises from elasticsearch.dsl import analysis def test_analyzer_serializes_as_name() -> None: a = analysis.analyzer("my_analyzer") assert "my_analyzer" == a.to_dict() # type: ignore def test_analyzer_has_definition() -> None: a = analysis.CustomAnalyzer( "my_analyzer", tokenizer="keyword", filter=["lowercase"] ) assert { "type": "custom", "tokenizer": "keyword", "filter": ["lowercase"], } == a.get_definition() def test_simple_multiplexer_filter() -> None: a = analysis.analyzer( "my_analyzer", tokenizer="keyword", filter=[ analysis.token_filter( "my_multi", "multiplexer", filters=["lowercase", "lowercase, stop"] ) ], ) assert { "analyzer": { "my_analyzer": { "filter": ["my_multi"], "tokenizer": "keyword", "type": "custom", } }, "filter": { "my_multi": { "filters": ["lowercase", "lowercase, stop"], "type": "multiplexer", } }, } == a.get_analysis_definition() def test_multiplexer_with_custom_filter() -> None: a = analysis.analyzer( "my_analyzer", tokenizer="keyword", filter=[ analysis.token_filter( "my_multi", "multiplexer", filters=[ [analysis.token_filter("en", "snowball", language="English")], "lowercase, stop", ], ) ], ) assert { "analyzer": { "my_analyzer": { "filter": ["my_multi"], "tokenizer": "keyword", "type": "custom", } }, "filter": { "en": {"type": "snowball", "language": "English"}, "my_multi": {"filters": ["en", "lowercase, stop"], "type": "multiplexer"}, }, } == a.get_analysis_definition() def test_conditional_token_filter() -> None: a = analysis.analyzer( "my_cond", tokenizer=analysis.tokenizer("keyword"), filter=[ analysis.token_filter( "testing", "condition", script={"source": "return true"}, filter=[ "lowercase", analysis.token_filter("en", "snowball", language="English"), ], ), "stop", ], ) assert { "analyzer": { "my_cond": { "filter": ["testing", "stop"], "tokenizer": "keyword", "type": "custom", } }, "filter": { "en": {"language": "English", "type": "snowball"}, "testing": { "script": {"source": "return true"}, "filter": ["lowercase", "en"], "type": "condition", }, }, } == a.get_analysis_definition() def test_conflicting_nested_filters_cause_error() -> None: a = analysis.analyzer( "my_cond", tokenizer=analysis.tokenizer("keyword"), filter=[ analysis.token_filter("en", "stemmer", language="english"), analysis.token_filter( "testing", "condition", script={"source": "return true"}, filter=[ "lowercase", analysis.token_filter("en", "snowball", language="English"), ], ), ], ) with raises(ValueError): a.get_analysis_definition() def test_normalizer_serializes_as_name() -> None: n = analysis.normalizer("my_normalizer") assert "my_normalizer" == n.to_dict() # type: ignore def test_normalizer_has_definition() -> None: n = analysis.CustomNormalizer( "my_normalizer", filter=["lowercase", "asciifolding"], char_filter=["quote"] ) assert { "type": "custom", "filter": ["lowercase", "asciifolding"], "char_filter": ["quote"], } == n.get_definition() def test_tokenizer() -> None: t = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3) assert t.to_dict() == "trigram" # type: ignore assert {"type": "nGram", "min_gram": 3, "max_gram": 3} == t.get_definition() def test_custom_analyzer_can_collect_custom_items() -> None: trigram = analysis.tokenizer("trigram", "nGram", min_gram=3, max_gram=3) my_stop = analysis.token_filter("my_stop", "stop", stopwords=["a", "b"]) umlauts = analysis.char_filter("umlauts", "pattern_replace", mappings=["ü=>ue"]) a = analysis.analyzer( "my_analyzer", tokenizer=trigram, filter=["lowercase", my_stop], char_filter=["html_strip", umlauts], ) assert a.to_dict() == "my_analyzer" # type: ignore assert { "analyzer": { "my_analyzer": { "type": "custom", "tokenizer": "trigram", "filter": ["lowercase", "my_stop"], "char_filter": ["html_strip", "umlauts"], } }, "tokenizer": {"trigram": trigram.get_definition()}, "filter": {"my_stop": my_stop.get_definition()}, "char_filter": {"umlauts": umlauts.get_definition()}, } == a.get_analysis_definition() def test_stemmer_analyzer_can_pass_name() -> None: t = analysis.token_filter( "my_english_filter", name="minimal_english", type="stemmer" ) assert t.to_dict() == "my_english_filter" # type: ignore assert {"type": "stemmer", "name": "minimal_english"} == t.get_definition() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_connections.py000066400000000000000000000111301506101734100272720ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, List from pytest import raises from elasticsearch import Elasticsearch from elasticsearch.dsl import connections, serializer class DummyElasticsearch: def __init__(self, *args: Any, hosts: List[str], **kwargs: Any): self.hosts = hosts def test_default_connection_is_returned_by_default() -> None: c = connections.Connections[object](elasticsearch_class=object) con, con2 = object(), object() c.add_connection("default", con) c.add_connection("not-default", con2) assert c.get_connection() is con def test_get_connection_created_connection_if_needed() -> None: c = connections.Connections[DummyElasticsearch]( elasticsearch_class=DummyElasticsearch ) c.configure( default={"hosts": ["https://es.com:9200"]}, local={"hosts": ["https://localhost:9200"]}, ) default = c.get_connection() local = c.get_connection("local") assert isinstance(default, DummyElasticsearch) assert isinstance(local, DummyElasticsearch) assert default.hosts == ["https://es.com:9200"] assert local.hosts == ["https://localhost:9200"] def test_configure_preserves_unchanged_connections() -> None: c = connections.Connections[DummyElasticsearch]( elasticsearch_class=DummyElasticsearch ) c.configure( default={"hosts": ["https://es.com:9200"]}, local={"hosts": ["https://localhost:9200"]}, ) default = c.get_connection() local = c.get_connection("local") c.configure( default={"hosts": ["https://not-es.com:9200"]}, local={"hosts": ["https://localhost:9200"]}, ) new_default = c.get_connection() new_local = c.get_connection("local") assert new_local is local assert new_default is not default def test_remove_connection_removes_both_conn_and_conf() -> None: c = connections.Connections[object](elasticsearch_class=DummyElasticsearch) c.configure( default={"hosts": ["https://es.com:9200"]}, local={"hosts": ["https://localhost:9200"]}, ) c.add_connection("local2", object()) c.remove_connection("default") c.get_connection("local2") c.remove_connection("local2") with raises(Exception): c.get_connection("local2") c.get_connection("default") def test_create_connection_constructs_client() -> None: c = connections.Connections[DummyElasticsearch]( elasticsearch_class=DummyElasticsearch ) c.create_connection("testing", hosts=["https://es.com:9200"]) con = c.get_connection("testing") assert con.hosts == ["https://es.com:9200"] def test_create_connection_adds_our_serializer() -> None: c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch) c.create_connection("testing", hosts=["https://es.com:9200"]) c_serializers = c.get_connection("testing").transport.serializers assert c_serializers.serializers["application/json"] is serializer.serializer def test_connection_has_correct_user_agent() -> None: c = connections.Connections[Elasticsearch](elasticsearch_class=Elasticsearch) c.create_connection("testing", hosts=["https://es.com:9200"]) assert ( c.get_connection("testing") ._headers["user-agent"] .startswith("elasticsearch-dsl-py/") ) my_client = Elasticsearch(hosts=["http://localhost:9200"]) my_client = my_client.options(headers={"user-agent": "my-user-agent/1.0"}) c.add_connection("default", my_client) assert c.get_connection()._headers["user-agent"].startswith("elasticsearch-dsl-py/") my_client = Elasticsearch(hosts=["http://localhost:9200"]) assert ( c.get_connection(my_client) ._headers["user-agent"] .startswith("elasticsearch-dsl-py/") ) not_a_client = object() assert c.get_connection(not_a_client) == not_a_client # type: ignore[arg-type] python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_field.py000066400000000000000000000154601506101734100260450ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import ipaddress from datetime import date, datetime, time from typing import cast import pytest from dateutil import tz from elasticsearch.dsl import InnerDoc, Range, ValidationException, field def test_date_range_deserialization() -> None: data = {"lt": "2018-01-01T00:30:10"} r = field.DateRange().deserialize(data) assert isinstance(r, Range) assert r.lt == datetime(2018, 1, 1, 0, 30, 10) def test_boolean_deserialization() -> None: bf = field.Boolean() assert not bf.deserialize("false") assert not bf.deserialize(False) assert not bf.deserialize("") assert not bf.deserialize(0) assert bf.deserialize(True) assert bf.deserialize("true") assert bf.deserialize(1) def test_datetime_deserialization() -> None: f = field.Date() dt = datetime.now() assert dt == f._deserialize(dt.isoformat()) d = date.today() assert datetime.combine(d, time()) == f._deserialize(d.isoformat()) def test_date_deserialization() -> None: f = field.Date(format="yyyy-MM-dd") d = date.today() assert d == f._deserialize(d.isoformat()) dt = datetime.now() assert dt.date() == f._deserialize(dt.isoformat()) def test_date_field_can_have_default_tz() -> None: f = field.Date(default_timezone="UTC") now = datetime.now() now_with_tz = cast(datetime, f._deserialize(now)) assert now_with_tz.tzinfo == tz.gettz("UTC") assert now.isoformat() + "+00:00" == now_with_tz.isoformat() now_with_tz = cast(datetime, f._deserialize(now.isoformat())) assert now_with_tz.tzinfo == tz.gettz("UTC") assert now.isoformat() + "+00:00" == now_with_tz.isoformat() def test_custom_field_car_wrap_other_field() -> None: class MyField(field.CustomField): @property def builtin_type(self) -> field.Text: return field.Text(**self._params) assert {"type": "text", "index": "not_analyzed"} == MyField( index="not_analyzed" ).to_dict() def test_field_from_dict() -> None: f = field.construct_field({"type": "text", "index": "not_analyzed"}) assert isinstance(f, field.Text) assert {"type": "text", "index": "not_analyzed"} == f.to_dict() def test_multi_fields_are_accepted_and_parsed() -> None: f = field.construct_field( "text", fields={"raw": {"type": "keyword"}, "eng": field.Text(analyzer="english")}, ) assert isinstance(f, field.Text) assert { "type": "text", "fields": { "raw": {"type": "keyword"}, "eng": {"type": "text", "analyzer": "english"}, }, } == f.to_dict() def test_nested_provides_direct_access_to_its_fields() -> None: f = field.Nested(properties={"name": {"type": "text", "index": "not_analyzed"}}) assert "name" in f assert f["name"] == field.Text(index="not_analyzed") def test_field_supports_multiple_analyzers() -> None: f = field.Text(analyzer="snowball", search_analyzer="keyword") assert { "analyzer": "snowball", "search_analyzer": "keyword", "type": "text", } == f.to_dict() def test_multifield_supports_multiple_analyzers() -> None: f = field.Text( fields={ "f1": field.Text(search_analyzer="keyword", analyzer="snowball"), "f2": field.Text(analyzer="keyword"), } ) assert { "fields": { "f1": { "analyzer": "snowball", "search_analyzer": "keyword", "type": "text", }, "f2": {"analyzer": "keyword", "type": "text"}, }, "type": "text", } == f.to_dict() def test_scaled_float() -> None: with pytest.raises(TypeError): field.ScaledFloat() # type: ignore f = field.ScaledFloat(123) assert f.to_dict() == {"scaling_factor": 123, "type": "scaled_float"} def test_ipaddress() -> None: f = field.Ip() assert f.deserialize("127.0.0.1") == ipaddress.ip_address("127.0.0.1") assert f.deserialize("::1") == ipaddress.ip_address("::1") assert f.serialize(f.deserialize("::1")) == "::1" assert f.deserialize(None) is None with pytest.raises(ValueError): assert f.deserialize("not_an_ipaddress") def test_float() -> None: f = field.Float() assert f.deserialize("42") == 42.0 assert f.deserialize(None) is None with pytest.raises(ValueError): assert f.deserialize("not_a_float") def test_integer() -> None: f = field.Integer() assert f.deserialize("42") == 42 assert f.deserialize(None) is None with pytest.raises(ValueError): assert f.deserialize("not_an_integer") def test_binary() -> None: f = field.Binary() assert f.deserialize(base64.b64encode(b"42")) == b"42" assert f.deserialize(f.serialize(b"42")) == b"42" assert f.deserialize(None) is None def test_constant_keyword() -> None: f = field.ConstantKeyword() assert f.to_dict() == {"type": "constant_keyword"} def test_rank_features() -> None: f = field.RankFeatures() assert f.to_dict() == {"type": "rank_features"} def test_object_dynamic_values() -> None: f = field.Object(dynamic=True) assert f.to_dict()["dynamic"] is True f = field.Object(dynamic=False) assert f.to_dict()["dynamic"] is False f = field.Object(dynamic="strict") assert f.to_dict()["dynamic"] == "strict" def test_object_disabled() -> None: f = field.Object(enabled=False) assert f.to_dict() == {"type": "object", "enabled": False} def test_object_constructor() -> None: expected = {"type": "object", "properties": {"inner_int": {"type": "integer"}}} class Inner(InnerDoc): inner_int = field.Integer() obj_from_doc = field.Object(doc_class=Inner) assert obj_from_doc.to_dict() == expected obj_from_props = field.Object(properties={"inner_int": field.Integer()}) assert obj_from_props.to_dict() == expected with pytest.raises(ValidationException): field.Object(doc_class=Inner, properties={"inner_int": field.Integer()}) with pytest.raises(ValidationException): field.Object(doc_class=Inner, dynamic=False) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/000077500000000000000000000000001506101734100267255ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/__init__.py000066400000000000000000000014231506101734100310360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/000077500000000000000000000000001506101734100302015ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/__init__.py000066400000000000000000000014231506101734100323120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_analysis.py000066400000000000000000000036751506101734100334500ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import analyzer, token_filter, tokenizer @pytest.mark.asyncio async def test_simulate_with_just__builtin_tokenizer( async_client: AsyncElasticsearch, ) -> None: a = analyzer("my-analyzer", tokenizer="keyword") tokens = (await a.async_simulate("Hello World!", using=async_client)).tokens assert len(tokens) == 1 assert tokens[0].token == "Hello World!" @pytest.mark.asyncio async def test_simulate_complex(async_client: AsyncElasticsearch) -> None: a = analyzer( "my-analyzer", tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"), filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])], ) tokens = (await a.async_simulate("if:this:works", using=async_client)).tokens assert len(tokens) == 2 assert ["this", "works"] == [t.token for t in tokens] @pytest.mark.asyncio async def test_simulate_builtin(async_client: AsyncElasticsearch) -> None: a = analyzer("my-analyzer", "english") tokens = (await a.async_simulate("fixes running")).tokens assert ["fix", "run"] == [t.token for t in tokens] python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_document.py000066400000000000000000000644531506101734100334440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # this file creates several documents using bad or no types because # these are still supported and should be kept functional in spite # of not having appropriate type hints. For that reason the comment # below disables many mypy checks that fails as a result of this. # mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" from datetime import datetime from ipaddress import ip_address from typing import TYPE_CHECKING, Any, AsyncIterator, Dict, List, Optional, Tuple, Union import pytest from pytest import raises from pytz import timezone from elasticsearch import AsyncElasticsearch, ConflictError, NotFoundError from elasticsearch.dsl import ( AsyncDocument, AsyncSearch, Binary, Boolean, Date, DenseVector, Double, InnerDoc, Ip, Keyword, Long, M, Mapping, MetaField, Nested, Object, Q, RankFeatures, Text, analyzer, mapped_field, ) from elasticsearch.dsl.query import Match from elasticsearch.dsl.types import MatchQuery from elasticsearch.dsl.utils import AttrList from elasticsearch.helpers.errors import BulkIndexError snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"]) class User(InnerDoc): name = Text(fields={"raw": Keyword()}) class Wiki(AsyncDocument): owner = Object(User) views = Long() ranked = RankFeatures() class Index: name = "test-wiki" class Repository(AsyncDocument): owner = Object(User) created_at = Date() description = Text(analyzer=snowball) tags = Keyword() @classmethod def search(cls) -> AsyncSearch["Repository"]: # type: ignore[override] return super().search().filter("term", commit_repo="repo") class Index: name = "git" class Commit(AsyncDocument): committed_date = Date() authored_date = Date() description = Text(analyzer=snowball) class Index: name = "flat-git" class Meta: mapping = Mapping() class History(InnerDoc): timestamp = Date() diff = Text() class Comment(InnerDoc): content = Text() created_at = Date() author = Object(User) history = Nested(History) class Meta: dynamic = MetaField(False) class PullRequest(AsyncDocument): comments = Nested(Comment) created_at = Date() class Index: name = "test-prs" class SerializationDoc(AsyncDocument): i = Long() b = Boolean() d = Double() bin = Binary() ip = Ip() class Index: name = "test-serialization" class Tags(AsyncDocument): tags = Keyword(multi=True) class Index: name = "tags" @pytest.mark.asyncio async def test_serialization(async_write_client: AsyncElasticsearch) -> None: await SerializationDoc.init() await async_write_client.index( index="test-serialization", id=42, body={ "i": [1, 2, "3", None], "b": [True, False, "true", "false", None], "d": [0.1, "-0.1", None], "bin": ["SGVsbG8gV29ybGQ=", None], "ip": ["::1", "127.0.0.1", None], }, ) sd = await SerializationDoc.get(id=42) assert sd is not None assert sd.i == [1, 2, 3, None] assert sd.b == [True, False, True, False, None] assert sd.d == [0.1, -0.1, None] assert sd.bin == [b"Hello World", None] assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None] assert sd.to_dict() == { "b": [True, False, True, False, None], "bin": ["SGVsbG8gV29ybGQ=", None], "d": [0.1, -0.1, None], "i": [1, 2, 3, None], "ip": ["::1", "127.0.0.1", None], } @pytest.mark.asyncio async def test_nested_inner_hits_are_wrapped_properly(async_pull_request: Any) -> None: history_query = Q( "nested", path="comments.history", inner_hits={}, query=Q("match", comments__history__diff="ahoj"), ) s = PullRequest.search().query( "nested", inner_hits={}, path="comments", query=history_query ) response = await s.execute() pr = response.hits[0] assert isinstance(pr, PullRequest) assert isinstance(pr.comments[0], Comment) assert isinstance(pr.comments[0].history[0], History) comment = pr.meta.inner_hits.comments.hits[0] assert isinstance(comment, Comment) assert comment.author.name == "honzakral" assert isinstance(comment.history[0], History) history = comment.meta.inner_hits["comments.history"].hits[0] assert isinstance(history, History) assert history.timestamp == datetime(2012, 1, 1) assert "score" in history.meta @pytest.mark.asyncio async def test_nested_inner_hits_are_deserialized_properly( async_pull_request: Any, ) -> None: s = PullRequest.search().query( "nested", inner_hits={}, path="comments", query=Q("match", comments__content="hello"), ) response = await s.execute() pr = response.hits[0] assert isinstance(pr.created_at, datetime) assert isinstance(pr.comments[0], Comment) assert isinstance(pr.comments[0].created_at, datetime) @pytest.mark.asyncio async def test_nested_top_hits_are_wrapped_properly(async_pull_request: Any) -> None: s = PullRequest.search() s.aggs.bucket("comments", "nested", path="comments").metric( "hits", "top_hits", size=1 ) r = await s.execute() print(r._d_) assert isinstance(r.aggregations.comments.hits.hits[0], Comment) @pytest.mark.asyncio async def test_update_object_field(async_write_client: AsyncElasticsearch) -> None: await Wiki.init() w = Wiki( owner=User(name="Honza Kral"), _id="elasticsearch-py", ranked={"test1": 0.1, "topic2": 0.2}, ) await w.save() assert "updated" == await w.update(owner=[{"name": "Honza"}, User(name="Nick")]) assert w.owner[0].name == "Honza" assert w.owner[1].name == "Nick" w = await Wiki.get(id="elasticsearch-py") assert w.owner[0].name == "Honza" assert w.owner[1].name == "Nick" assert w.ranked == {"test1": 0.1, "topic2": 0.2} @pytest.mark.asyncio async def test_update_script(async_write_client: AsyncElasticsearch) -> None: await Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) await w.save() await w.update(script="ctx._source.views += params.inc", inc=5) w = await Wiki.get(id="elasticsearch-py") assert w.views == 47 @pytest.mark.asyncio async def test_update_script_with_dict(async_write_client: AsyncElasticsearch) -> None: await Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) await w.save() await w.update( script={ "source": "ctx._source.views += params.inc1 + params.inc2", "params": {"inc1": 2}, "lang": "painless", }, inc2=3, ) w = await Wiki.get(id="elasticsearch-py") assert w.views == 47 @pytest.mark.asyncio async def test_update_retry_on_conflict(async_write_client: AsyncElasticsearch) -> None: await Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) await w.save() w1 = await Wiki.get(id="elasticsearch-py") w2 = await Wiki.get(id="elasticsearch-py") assert w1 is not None assert w2 is not None await w1.update( script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1 ) await w2.update( script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1 ) w = await Wiki.get(id="elasticsearch-py") assert w.views == 52 @pytest.mark.asyncio @pytest.mark.parametrize("retry_on_conflict", [None, 0]) async def test_update_conflicting_version( async_write_client: AsyncElasticsearch, retry_on_conflict: bool ) -> None: await Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) await w.save() w1 = await Wiki.get(id="elasticsearch-py") w2 = await Wiki.get(id="elasticsearch-py") assert w1 is not None assert w2 is not None await w1.update(script="ctx._source.views += params.inc", inc=5) with raises(ConflictError): await w2.update( script="ctx._source.views += params.inc", inc=5, retry_on_conflict=retry_on_conflict, ) @pytest.mark.asyncio async def test_save_and_update_return_doc_meta( async_write_client: AsyncElasticsearch, ) -> None: await Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) resp = await w.save(return_doc_meta=True) assert resp["_index"] == "test-wiki" assert resp["result"] == "created" assert set(resp.keys()) == { "_id", "_index", "_primary_term", "_seq_no", "_shards", "_version", "result", } resp = await w.update( script="ctx._source.views += params.inc", inc=5, return_doc_meta=True ) assert resp["_index"] == "test-wiki" assert resp["result"] == "updated" assert set(resp.keys()) == { "_id", "_index", "_primary_term", "_seq_no", "_shards", "_version", "result", } @pytest.mark.asyncio async def test_init(async_write_client: AsyncElasticsearch) -> None: await Repository.init(index="test-git") assert await async_write_client.indices.exists(index="test-git") @pytest.mark.asyncio async def test_get_raises_404_on_index_missing( async_data_client: AsyncElasticsearch, ) -> None: with raises(NotFoundError): await Repository.get("elasticsearch-dsl-php", index="not-there") @pytest.mark.asyncio async def test_get_raises_404_on_non_existent_id( async_data_client: AsyncElasticsearch, ) -> None: with raises(NotFoundError): await Repository.get("elasticsearch-dsl-php") @pytest.mark.asyncio async def test_get_returns_none_if_404_ignored( async_data_client: AsyncElasticsearch, ) -> None: assert None is await Repository.get( "elasticsearch-dsl-php", using=async_data_client.options(ignore_status=404) ) @pytest.mark.asyncio async def test_get_returns_none_if_404_ignored_and_index_doesnt_exist( async_data_client: AsyncElasticsearch, ) -> None: assert None is await Repository.get( "42", index="not-there", using=async_data_client.options(ignore_status=404) ) @pytest.mark.asyncio async def test_get(async_data_client: AsyncElasticsearch) -> None: elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") assert isinstance(elasticsearch_repo, Repository) assert elasticsearch_repo.owner.name == "elasticsearch" assert datetime(2014, 3, 3) == elasticsearch_repo.created_at @pytest.mark.asyncio async def test_exists_return_true(async_data_client: AsyncElasticsearch) -> None: assert await Repository.exists("elasticsearch-dsl-py") @pytest.mark.asyncio async def test_exists_false(async_data_client: AsyncElasticsearch) -> None: assert not await Repository.exists("elasticsearch-dsl-php") @pytest.mark.asyncio async def test_get_with_tz_date(async_data_client: AsyncElasticsearch) -> None: first_commit = await Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None tzinfo = timezone("Europe/Prague") assert ( tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000)) == first_commit.authored_date ) @pytest.mark.asyncio async def test_save_with_tz_date(async_data_client: AsyncElasticsearch) -> None: tzinfo = timezone("Europe/Prague") first_commit = await Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None first_commit.committed_date = tzinfo.localize( datetime(2014, 5, 2, 13, 47, 19, 123456) ) await first_commit.save() first_commit = await Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None assert ( tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456)) == first_commit.committed_date ) COMMIT_DOCS_WITH_MISSING = [ {"_id": "0"}, # Missing {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"}, # Existing {"_id": "f"}, # Missing {"_id": "eb3e543323f189fd7b698e66295427204fff5755"}, # Existing ] @pytest.mark.asyncio async def test_mget(async_data_client: AsyncElasticsearch) -> None: commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING) assert commits[0] is None assert commits[1] is not None assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" assert commits[2] is None assert commits[3] is not None assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" @pytest.mark.asyncio async def test_mget_raises_exception_when_missing_param_is_invalid( async_data_client: AsyncElasticsearch, ) -> None: with raises(ValueError): await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj") @pytest.mark.asyncio async def test_mget_raises_404_when_missing_param_is_raise( async_data_client: AsyncElasticsearch, ) -> None: with raises(NotFoundError): await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise") @pytest.mark.asyncio async def test_mget_ignores_missing_docs_when_missing_param_is_skip( async_data_client: AsyncElasticsearch, ) -> None: commits = await Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip") assert commits[0] is not None assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" assert commits[1] is not None assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" @pytest.mark.asyncio async def test_update_works_from_search_response( async_data_client: AsyncElasticsearch, ) -> None: elasticsearch_repo = (await Repository.search().execute())[0] await elasticsearch_repo.update(owner={"other_name": "elastic"}) assert "elastic" == elasticsearch_repo.owner.other_name new_version = await Repository.get("elasticsearch-dsl-py") assert new_version is not None assert "elastic" == new_version.owner.other_name assert "elasticsearch" == new_version.owner.name @pytest.mark.asyncio async def test_update(async_data_client: AsyncElasticsearch) -> None: elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None v = elasticsearch_repo.meta.version old_seq_no = elasticsearch_repo.meta.seq_no await elasticsearch_repo.update( owner={"new_name": "elastic"}, new_field="testing-update" ) assert "elastic" == elasticsearch_repo.owner.new_name assert "testing-update" == elasticsearch_repo.new_field # assert version has been updated assert elasticsearch_repo.meta.version == v + 1 new_version = await Repository.get("elasticsearch-dsl-py") assert new_version is not None assert "testing-update" == new_version.new_field assert "elastic" == new_version.owner.new_name assert "elasticsearch" == new_version.owner.name assert "seq_no" in new_version.meta assert new_version.meta.seq_no != old_seq_no assert "primary_term" in new_version.meta @pytest.mark.asyncio async def test_save_updates_existing_doc(async_data_client: AsyncElasticsearch) -> None: elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.new_field = "testing-save" old_seq_no = elasticsearch_repo.meta.seq_no assert "updated" == await elasticsearch_repo.save() new_repo = await async_data_client.get(index="git", id="elasticsearch-dsl-py") assert "testing-save" == new_repo["_source"]["new_field"] assert new_repo["_seq_no"] != old_seq_no assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no @pytest.mark.asyncio async def test_update_empty_field(async_client: AsyncElasticsearch) -> None: await Tags._index.delete(ignore_unavailable=True) await Tags.init() d = Tags(id="123", tags=["a", "b"]) await d.save(refresh=True) await d.update(tags=[], refresh=True) assert d.tags == [] r = await Tags.search().execute() assert r.hits[0].tags == [] @pytest.mark.asyncio async def test_save_automatically_uses_seq_no_and_primary_term( async_data_client: AsyncElasticsearch, ) -> None: elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.meta.seq_no += 1 with raises(ConflictError): await elasticsearch_repo.save() @pytest.mark.asyncio async def test_delete_automatically_uses_seq_no_and_primary_term( async_data_client: AsyncElasticsearch, ) -> None: elasticsearch_repo = await Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.meta.seq_no += 1 with raises(ConflictError): await elasticsearch_repo.delete() def assert_doc_equals(expected: Any, actual: Any) -> None: for f in expected: assert f in actual assert actual[f] == expected[f] @pytest.mark.asyncio async def test_can_save_to_different_index( async_write_client: AsyncElasticsearch, ) -> None: test_repo = Repository(description="testing", meta={"id": 42}) assert await test_repo.save(index="test-document") assert_doc_equals( { "found": True, "_index": "test-document", "_id": "42", "_source": {"description": "testing"}, }, await async_write_client.get(index="test-document", id=42), ) @pytest.mark.asyncio async def test_save_without_skip_empty_will_include_empty_fields( async_write_client: AsyncElasticsearch, ) -> None: test_repo = Repository( field_1=[], field_2=None, field_3={}, owner={"name": None}, meta={"id": 42} ) assert await test_repo.save(index="test-document", skip_empty=False) assert_doc_equals( { "found": True, "_index": "test-document", "_id": "42", "_source": { "field_1": [], "field_2": None, "field_3": {}, "owner": {"name": None}, }, }, await async_write_client.get(index="test-document", id=42), ) @pytest.mark.asyncio async def test_delete(async_write_client: AsyncElasticsearch) -> None: await async_write_client.create( index="test-document", id="elasticsearch-dsl-py", body={ "organization": "elasticsearch", "created_at": "2014-03-03", "owner": {"name": "elasticsearch"}, }, ) test_repo = Repository(meta={"id": "elasticsearch-dsl-py"}) test_repo.meta.index = "test-document" await test_repo.delete() assert not await async_write_client.exists( index="test-document", id="elasticsearch-dsl-py", ) @pytest.mark.asyncio async def test_search(async_data_client: AsyncElasticsearch) -> None: assert await Repository.search().count() == 1 @pytest.mark.asyncio async def test_search_returns_proper_doc_classes( async_data_client: AsyncElasticsearch, ) -> None: result = await Repository.search().execute() elasticsearch_repo = result.hits[0] assert isinstance(elasticsearch_repo, Repository) assert elasticsearch_repo.owner.name == "elasticsearch" @pytest.mark.asyncio async def test_refresh_mapping(async_data_client: AsyncElasticsearch) -> None: class Commit(AsyncDocument): class Index: name = "git" await Commit._index.load_mappings() assert "stats" in Commit._index._mapping assert "committer" in Commit._index._mapping assert "description" in Commit._index._mapping assert "committed_date" in Commit._index._mapping assert isinstance(Commit._index._mapping["committed_date"], Date) @pytest.mark.asyncio async def test_highlight_in_meta(async_data_client: AsyncElasticsearch) -> None: commit = ( await Commit.search() .query("match", description="inverting") .highlight("description") .execute() )[0] assert isinstance(commit, Commit) assert "description" in commit.meta.highlight assert isinstance(commit.meta.highlight["description"], AttrList) assert len(commit.meta.highlight["description"]) > 0 @pytest.mark.asyncio async def test_bulk(async_data_client: AsyncElasticsearch) -> None: class Address(InnerDoc): street: str active: bool class Doc(AsyncDocument): if TYPE_CHECKING: _id: int name: str age: int languages: List[str] = mapped_field(Keyword()) addresses: List[Address] class Index: name = "bulk-index" await Doc._index.delete(ignore_unavailable=True) await Doc.init() async def gen1() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: yield Doc( name="Joe", age=33, languages=["en", "fr"], addresses=[ Address(street="123 Main St", active=True), Address(street="321 Park Dr.", active=False), ], ) yield Doc(name="Susan", age=20, languages=["en"]) yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} await Doc.bulk(gen1(), refresh=True) docs = list(await Doc.search().execute()) assert len(docs) == 3 assert docs[0].to_dict() == { "name": "Joe", "age": 33, "languages": [ "en", "fr", ], "addresses": [ { "active": True, "street": "123 Main St", }, { "active": False, "street": "321 Park Dr.", }, ], } assert docs[1].to_dict() == { "name": "Susan", "age": 20, "languages": ["en"], } assert docs[2].to_dict() == { "name": "Sarah", "age": 45, } assert docs[2].meta.id == "45" async def gen2() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} # a "create" action with an existing id should fail with raises(BulkIndexError): await Doc.bulk(gen2(), refresh=True) async def gen3() -> AsyncIterator[Union[Doc, Dict[str, Any]]]: yield Doc(_id="45", name="Sarah", age=45, languages=["es"]) yield {"_op_type": "delete", "_id": docs[1].meta.id} await Doc.bulk(gen3(), refresh=True) with raises(NotFoundError): await Doc.get(docs[1].meta.id) doc = await Doc.get("45") assert doc is not None assert (doc).to_dict() == { "name": "Sarah", "age": 45, "languages": ["es"], } @pytest.mark.asyncio async def test_legacy_dense_vector( async_client: AsyncElasticsearch, es_version: Tuple[int, ...] ) -> None: if es_version >= (8, 16): pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older") class Doc(AsyncDocument): float_vector: List[float] = mapped_field(DenseVector(dims=3)) class Index: name = "vectors" await Doc._index.delete(ignore_unavailable=True) await Doc.init() doc = Doc(float_vector=[1.0, 1.2, 2.3]) await doc.save(refresh=True) docs = await Doc.search().execute() assert len(docs) == 1 assert docs[0].float_vector == doc.float_vector @pytest.mark.asyncio async def test_dense_vector( async_client: AsyncElasticsearch, es_version: Tuple[int, ...] ) -> None: if es_version < (8, 16): pytest.skip("this test requires Elasticsearch 8.16 or newer") class Doc(AsyncDocument): float_vector: List[float] = mapped_field(DenseVector()) byte_vector: List[int] = mapped_field(DenseVector(element_type="byte")) bit_vector: str = mapped_field(DenseVector(element_type="bit")) class Index: name = "vectors" await Doc._index.delete(ignore_unavailable=True) await Doc.init() doc = Doc( float_vector=[1.0, 1.2, 2.3], byte_vector=[12, 23, 34, 45], bit_vector="12abf0" ) await doc.save(refresh=True) docs = await Doc.search().execute() assert len(docs) == 1 assert docs[0].float_vector == doc.float_vector assert docs[0].byte_vector == doc.byte_vector assert docs[0].bit_vector == doc.bit_vector @pytest.mark.asyncio async def test_copy_to(async_client: AsyncElasticsearch) -> None: class Person(AsyncDocument): first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) birth_place: M[str] = mapped_field(Text(copy_to="all")) full_name: M[Optional[str]] = mapped_field(init=False) all: M[Optional[str]] = mapped_field(init=False) class Index: name = "people" await Person._index.delete(ignore_unavailable=True) await Person.init() person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield") await person.save() await Person._index.refresh() match = ( await Person.search() .query(Match(Person.full_name, MatchQuery(query="Jane"))) .execute() ) assert len(match) == 1 match = ( await Person.search() .query(Match(Person.all, MatchQuery(query="Doe"))) .execute() ) assert len(match) == 1 match = ( await Person.search() .query(Match(Person.full_name, MatchQuery(query="Springfield"))) .execute() ) assert len(match) == 0 match = ( await Person.search() .query(Match(Person.all, MatchQuery(query="Springfield"))) .execute() ) assert len(match) == 1 python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_esql.py000066400000000000000000000166361506101734100325720ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch.dsl import AsyncDocument, InnerDoc, M from elasticsearch.esql import ESQL, E, functions class Address(InnerDoc): address: M[str] city: M[str] class Employee(AsyncDocument): emp_no: M[int] first_name: M[str] last_name: M[str] height: M[float] still_hired: M[bool] address: M[Address] class Index: name = "employees" async def load_db(): data = [ [ 10000, "Joseph", "Wall", 2.2, True, Address(address="8875 Long Shoals Suite 441", city="Marcville, TX"), ], [ 10001, "Stephanie", "Ward", 1.749, True, Address(address="90162 Carter Harbor Suite 099", city="Davisborough, DE"), ], [ 10002, "David", "Keller", 1.872, True, Address(address="6697 Patrick Union Suite 797", city="Fuentesmouth, SD"), ], [ 10003, "Roger", "Hinton", 1.694, False, Address(address="809 Kelly Mountains", city="South Megan, DE"), ], [ 10004, "Joshua", "Garcia", 1.661, False, Address(address="718 Angela Forks", city="Port Erinland, MA"), ], [ 10005, "Matthew", "Richards", 1.633, False, Address(address="2869 Brown Mountains", city="New Debra, NH"), ], [ 10006, "Maria", "Luna", 1.893, True, Address(address="5861 Morgan Springs", city="Lake Daniel, WI"), ], [ 10007, "Angela", "Navarro", 1.604, False, Address(address="2848 Allen Station", city="Saint Joseph, OR"), ], [ 10008, "Maria", "Cannon", 2.079, False, Address(address="322 NW Johnston", city="Bakerburgh, MP"), ], [ 10009, "Joseph", "Sutton", 2.025, True, Address(address="77 Cardinal E", city="Lakestown, IL"), ], ] if await Employee._index.exists(): await Employee._index.delete() await Employee.init() for e in data: employee = Employee( emp_no=e[0], first_name=e[1], last_name=e[2], height=e[3], still_hired=e[4], address=e[5], ) await employee.save() await Employee._index.refresh() @pytest.mark.asyncio async def test_esql(async_client): await load_db() # get the full names of the employees query = ( ESQL.from_(Employee) .eval(full_name=functions.concat(Employee.first_name, " ", Employee.last_name)) .keep("full_name") .sort("full_name") .limit(10) ) r = await async_client.esql.query(query=str(query)) assert r.body["values"] == [ ["Angela Navarro"], ["David Keller"], ["Joseph Sutton"], ["Joseph Wall"], ["Joshua Garcia"], ["Maria Cannon"], ["Maria Luna"], ["Matthew Richards"], ["Roger Hinton"], ["Stephanie Ward"], ] # get the average height of all hired employees query = ESQL.from_(Employee).stats( avg_height=functions.round(functions.avg(Employee.height), 2).where( Employee.still_hired == True # noqa: E712 ) ) r = await async_client.esql.query(query=str(query)) assert r.body["values"] == [[1.95]] # find employees by name using a parameter query = ( ESQL.from_(Employee) .where(Employee.first_name == E("?")) .keep(Employee.last_name) .sort(Employee.last_name.desc()) ) r = await async_client.esql.query(query=str(query), params=["Maria"]) assert r.body["values"] == [["Luna"], ["Cannon"]] @pytest.mark.asyncio async def test_esql_dsl(async_client): await load_db() # get employees with first name "Maria" query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .sort("last_name") .limit(10) ) marias = [] async for emp in Employee.esql_execute(query): marias.append(emp) assert len(marias) == 2 assert marias[0].last_name == "Cannon" assert marias[0].address.address == "322 NW Johnston" assert marias[0].address.city == "Bakerburgh, MP" assert marias[1].last_name == "Luna" assert marias[1].address.address == "5861 Morgan Springs" assert marias[1].address.city == "Lake Daniel, WI" # run a query with a missing field query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .drop(Employee.address.city) .sort("last_name") .limit(10) ) with pytest.raises(ValueError): await Employee.esql_execute(query).__anext__() marias = [] async for emp in Employee.esql_execute(query, ignore_missing_fields=True): marias.append(emp) assert marias[0].last_name == "Cannon" assert marias[0].address.address == "322 NW Johnston" assert marias[0].address.city is None assert marias[1].last_name == "Luna" assert marias[1].address.address == "5861 Morgan Springs" assert marias[1].address.city is None # run a query with additional calculated fields query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .eval( full_name=functions.concat(Employee.first_name, " ", Employee.last_name), height_cm=functions.to_integer(Employee.height * 100), ) .sort("last_name") .limit(10) ) assert isinstance(await Employee.esql_execute(query).__anext__(), Employee) assert isinstance( await Employee.esql_execute(query, return_additional=True).__anext__(), tuple ) marias = [] async for emp, extra in Employee.esql_execute(query, return_additional=True): marias.append([emp, extra]) assert marias[0][0].last_name == "Cannon" assert marias[0][0].address.address == "322 NW Johnston" assert marias[0][0].address.city == "Bakerburgh, MP" assert marias[0][1] == {"full_name": "Maria Cannon", "height_cm": 208} assert marias[1][0].last_name == "Luna" assert marias[1][0].address.address == "5861 Morgan Springs" assert marias[1][0].address.city == "Lake Daniel, WI" assert marias[1][1] == {"full_name": "Maria Luna", "height_cm": 189} test_faceted_search.py000066400000000000000000000231131506101734100344530ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Tuple, Type import pytest from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import A, AsyncDocument, AsyncSearch, Boolean, Date, Keyword from elasticsearch.dsl.faceted_search import ( AsyncFacetedSearch, DateHistogramFacet, NestedFacet, RangeFacet, TermsFacet, ) from .test_document import PullRequest class Repos(AsyncDocument): is_public = Boolean() created_at = Date() class Index: name = "git" class Commit(AsyncDocument): files = Keyword() committed_date = Date() class Index: name = "git" class MetricSearch(AsyncFacetedSearch): index = "git" doc_types = [Commit] facets = { "files": TermsFacet(field="files", metric=A("max", field="committed_date")), } @pytest.fixture def commit_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: if es_version >= (7, 2): interval_kwargs = {"fixed_interval": "1d"} else: interval_kwargs = {"interval": "day"} class CommitSearch(AsyncFacetedSearch): index = "flat-git" fields = ( "description", "files", ) facets = { "files": TermsFacet(field="files"), "frequency": DateHistogramFacet( field="authored_date", min_doc_count=1, **interval_kwargs ), "deletions": RangeFacet( field="stats.deletions", ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))], ), } return CommitSearch @pytest.fixture def repo_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" class RepoSearch(AsyncFacetedSearch): index = "git" doc_types = [Repos] facets = { "public": TermsFacet(field="is_public"), "created": DateHistogramFacet( field="created_at", **{interval_type: "month"} ), } def search(self) -> AsyncSearch: s = super().search() return s.filter("term", commit_repo="repo") return RepoSearch @pytest.fixture def pr_search_cls(es_version: Tuple[int, ...]) -> Type[AsyncFacetedSearch]: interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" class PRSearch(AsyncFacetedSearch): index = "test-prs" doc_types = [PullRequest] facets = { "comments": NestedFacet( "comments", DateHistogramFacet( field="comments.created_at", **{interval_type: "month"} ), ) } return PRSearch @pytest.mark.asyncio async def test_facet_with_custom_metric(async_data_client: AsyncElasticsearch) -> None: ms = MetricSearch() r = await ms.execute() dates = [f[1] for f in r.facets.files] assert dates == list(sorted(dates, reverse=True)) assert dates[0] == 1399038439000 @pytest.mark.asyncio async def test_nested_facet( async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch] ) -> None: prs = pr_search_cls() r = await prs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments @pytest.mark.asyncio async def test_nested_facet_with_filter( async_pull_request: PullRequest, pr_search_cls: Type[AsyncFacetedSearch] ) -> None: prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)}) r = await prs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)}) r = await prs.execute() assert not r.hits @pytest.mark.asyncio async def test_datehistogram_facet( async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch] ) -> None: rs = repo_search_cls() r = await rs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created @pytest.mark.asyncio async def test_boolean_facet( async_data_client: AsyncElasticsearch, repo_search_cls: Type[AsyncFacetedSearch] ) -> None: rs = repo_search_cls() r = await rs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(True, 1, False)] == r.facets.public value, count, selected = r.facets.public[0] assert value is True @pytest.mark.asyncio async def test_empty_search_finds_everything( async_data_client: AsyncElasticsearch, es_version: Tuple[int, ...], commit_search_cls: Type[AsyncFacetedSearch], ) -> None: cs = commit_search_cls() r = await cs.execute() assert r.hits.total.value == 52 # type: ignore[attr-defined] assert [ ("elasticsearch_dsl", 40, False), ("test_elasticsearch_dsl", 35, False), ("elasticsearch_dsl/query.py", 19, False), ("test_elasticsearch_dsl/test_search.py", 15, False), ("elasticsearch_dsl/utils.py", 14, False), ("test_elasticsearch_dsl/test_query.py", 13, False), ("elasticsearch_dsl/search.py", 12, False), ("elasticsearch_dsl/aggs.py", 11, False), ("test_elasticsearch_dsl/test_result.py", 5, False), ("elasticsearch_dsl/result.py", 3, False), ] == r.facets.files assert [ (datetime(2014, 3, 3, 0, 0), 2, False), (datetime(2014, 3, 4, 0, 0), 1, False), (datetime(2014, 3, 5, 0, 0), 3, False), (datetime(2014, 3, 6, 0, 0), 3, False), (datetime(2014, 3, 7, 0, 0), 9, False), (datetime(2014, 3, 10, 0, 0), 2, False), (datetime(2014, 3, 15, 0, 0), 4, False), (datetime(2014, 3, 21, 0, 0), 2, False), (datetime(2014, 3, 23, 0, 0), 2, False), (datetime(2014, 3, 24, 0, 0), 10, False), (datetime(2014, 4, 20, 0, 0), 2, False), (datetime(2014, 4, 22, 0, 0), 2, False), (datetime(2014, 4, 25, 0, 0), 3, False), (datetime(2014, 4, 26, 0, 0), 2, False), (datetime(2014, 4, 27, 0, 0), 2, False), (datetime(2014, 5, 1, 0, 0), 2, False), (datetime(2014, 5, 2, 0, 0), 1, False), ] == r.facets.frequency assert [ ("ok", 19, False), ("good", 14, False), ("better", 19, False), ] == r.facets.deletions @pytest.mark.asyncio async def test_term_filters_are_shown_as_selected_and_data_is_filtered( async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] ) -> None: cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"}) r = await cs.execute() assert 35 == r.hits.total.value # type: ignore[attr-defined] assert [ ("elasticsearch_dsl", 40, False), ("test_elasticsearch_dsl", 35, True), # selected ("elasticsearch_dsl/query.py", 19, False), ("test_elasticsearch_dsl/test_search.py", 15, False), ("elasticsearch_dsl/utils.py", 14, False), ("test_elasticsearch_dsl/test_query.py", 13, False), ("elasticsearch_dsl/search.py", 12, False), ("elasticsearch_dsl/aggs.py", 11, False), ("test_elasticsearch_dsl/test_result.py", 5, False), ("elasticsearch_dsl/result.py", 3, False), ] == r.facets.files assert [ (datetime(2014, 3, 3, 0, 0), 1, False), (datetime(2014, 3, 5, 0, 0), 2, False), (datetime(2014, 3, 6, 0, 0), 3, False), (datetime(2014, 3, 7, 0, 0), 6, False), (datetime(2014, 3, 10, 0, 0), 1, False), (datetime(2014, 3, 15, 0, 0), 3, False), (datetime(2014, 3, 21, 0, 0), 2, False), (datetime(2014, 3, 23, 0, 0), 1, False), (datetime(2014, 3, 24, 0, 0), 7, False), (datetime(2014, 4, 20, 0, 0), 1, False), (datetime(2014, 4, 25, 0, 0), 3, False), (datetime(2014, 4, 26, 0, 0), 2, False), (datetime(2014, 4, 27, 0, 0), 1, False), (datetime(2014, 5, 1, 0, 0), 1, False), (datetime(2014, 5, 2, 0, 0), 1, False), ] == r.facets.frequency assert [ ("ok", 12, False), ("good", 10, False), ("better", 13, False), ] == r.facets.deletions @pytest.mark.asyncio async def test_range_filters_are_shown_as_selected_and_data_is_filtered( async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] ) -> None: cs = commit_search_cls(filters={"deletions": "better"}) r = await cs.execute() assert 19 == r.hits.total.value # type: ignore[attr-defined] @pytest.mark.asyncio async def test_pagination( async_data_client: AsyncElasticsearch, commit_search_cls: Type[AsyncFacetedSearch] ) -> None: cs = commit_search_cls() cs = cs[0:20] assert 52 == await cs.count() assert 20 == len(await cs.execute()) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_index.py000066400000000000000000000121731506101734100327250ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import ( AsyncComposableIndexTemplate, AsyncDocument, AsyncIndex, AsyncIndexTemplate, Date, Text, analysis, ) class Post(AsyncDocument): title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")) published_from = Date() @pytest.mark.asyncio async def test_index_template_works(async_write_client: AsyncElasticsearch) -> None: it = AsyncIndexTemplate("test-template", "test-legacy-*") it.document(Post) it.settings(number_of_replicas=0, number_of_shards=1) await it.save() i = AsyncIndex("test-legacy-blog") await i.create() assert { "test-legacy-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == await async_write_client.indices.get_mapping(index="test-legacy-blog") @pytest.mark.asyncio async def test_composable_index_template_works( async_write_client: AsyncElasticsearch, ) -> None: it = AsyncComposableIndexTemplate("test-template", "test-*") it.document(Post) it.settings(number_of_replicas=0, number_of_shards=1) await it.save() i = AsyncIndex("test-blog") await i.create() assert { "test-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == await async_write_client.indices.get_mapping(index="test-blog") @pytest.mark.asyncio async def test_index_can_be_saved_even_with_settings( async_write_client: AsyncElasticsearch, ) -> None: i = AsyncIndex("test-blog", using=async_write_client) i.settings(number_of_shards=3, number_of_replicas=0) await i.save() i.settings(number_of_replicas=1) await i.save() assert ( "1" == (await i.get_settings())["test-blog"]["settings"]["index"][ "number_of_replicas" ] ) @pytest.mark.asyncio async def test_index_exists(async_data_client: AsyncElasticsearch) -> None: assert await AsyncIndex("git").exists() assert not await AsyncIndex("not-there").exists() @pytest.mark.asyncio async def test_index_can_be_created_with_settings_and_mappings( async_write_client: AsyncElasticsearch, ) -> None: i = AsyncIndex("test-blog", using=async_write_client) i.document(Post) i.settings(number_of_replicas=0, number_of_shards=1) await i.create() assert { "test-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == await async_write_client.indices.get_mapping(index="test-blog") settings = await async_write_client.indices.get_settings(index="test-blog") assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0" assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1" assert settings["test-blog"]["settings"]["index"]["analysis"] == { "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} } @pytest.mark.asyncio async def test_delete(async_write_client: AsyncElasticsearch) -> None: await async_write_client.indices.create( index="test-index", body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}}, ) i = AsyncIndex("test-index", using=async_write_client) await i.delete() assert not await async_write_client.indices.exists(index="test-index") @pytest.mark.asyncio async def test_multiple_indices_with_same_doc_type_work( async_write_client: AsyncElasticsearch, ) -> None: i1 = AsyncIndex("test-index-1", using=async_write_client) i2 = AsyncIndex("test-index-2", using=async_write_client) for i in (i1, i2): i.document(Post) await i.create() for j in ("test-index-1", "test-index-2"): settings = await async_write_client.indices.get_settings(index=j) assert settings[j]["settings"]["index"]["analysis"] == { "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} } python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_mapping.py000066400000000000000000000134371506101734100332550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from pytest import raises from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import AsyncMapping, analysis, exceptions @pytest.mark.asyncio async def test_mapping_saved_into_es(async_write_client: AsyncElasticsearch) -> None: m = AsyncMapping() m.field( "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") ) m.field("tags", "keyword") await m.save("test-mapping", using=async_write_client) assert { "test-mapping": { "mappings": { "properties": { "name": {"type": "text", "analyzer": "my_analyzer"}, "tags": {"type": "keyword"}, } } } } == await async_write_client.indices.get_mapping(index="test-mapping") @pytest.mark.asyncio async def test_mapping_saved_into_es_when_index_already_exists_closed( async_write_client: AsyncElasticsearch, ) -> None: m = AsyncMapping() m.field( "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") ) await async_write_client.indices.create(index="test-mapping") with raises(exceptions.IllegalOperation): await m.save("test-mapping", using=async_write_client) await async_write_client.cluster.health( index="test-mapping", wait_for_status="yellow" ) await async_write_client.indices.close(index="test-mapping") await m.save("test-mapping", using=async_write_client) assert { "test-mapping": { "mappings": { "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}} } } } == await async_write_client.indices.get_mapping(index="test-mapping") @pytest.mark.asyncio async def test_mapping_saved_into_es_when_index_already_exists_with_analysis( async_write_client: AsyncElasticsearch, ) -> None: m = AsyncMapping() analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword") m.field("name", "text", analyzer=analyzer) new_analysis = analyzer.get_analysis_definition() new_analysis["analyzer"]["other_analyzer"] = { "type": "custom", "tokenizer": "whitespace", } await async_write_client.indices.create( index="test-mapping", body={"settings": {"analysis": new_analysis}} ) m.field("title", "text", analyzer=analyzer) await m.save("test-mapping", using=async_write_client) assert { "test-mapping": { "mappings": { "properties": { "name": {"type": "text", "analyzer": "my_analyzer"}, "title": {"type": "text", "analyzer": "my_analyzer"}, } } } } == await async_write_client.indices.get_mapping(index="test-mapping") @pytest.mark.asyncio async def test_mapping_gets_updated_from_es( async_write_client: AsyncElasticsearch, ) -> None: await async_write_client.indices.create( index="test-mapping", body={ "settings": {"number_of_shards": 1, "number_of_replicas": 0}, "mappings": { "date_detection": False, "properties": { "title": { "type": "text", "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, }, "created_at": {"type": "date"}, "comments": { "type": "nested", "properties": { "created": {"type": "date"}, "author": { "type": "text", "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, }, }, }, }, }, }, ) m = await AsyncMapping.from_es("test-mapping", using=async_write_client) assert ["comments", "created_at", "title"] == list( sorted(m.properties.properties._d_.keys()) # type: ignore[attr-defined] ) assert { "date_detection": False, "properties": { "comments": { "type": "nested", "properties": { "created": {"type": "date"}, "author": { "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, "type": "text", }, }, }, "created_at": {"type": "date"}, "title": { "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, "type": "text", }, }, } == m.to_dict() # test same with alias await async_write_client.indices.put_alias(index="test-mapping", name="test-alias") m2 = await AsyncMapping.from_es("test-alias", using=async_write_client) assert m2.to_dict() == m.to_dict() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async/test_search.py000066400000000000000000000215511506101734100330630ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from pytest import raises from elasticsearch import ApiError, AsyncElasticsearch from elasticsearch.dsl import ( AsyncDocument, AsyncMultiSearch, AsyncSearch, Date, Keyword, Q, Text, ) from elasticsearch.dsl.response import aggs from ..test_data import FLAT_DATA class Repository(AsyncDocument): created_at = Date() description = Text(analyzer="snowball") tags = Keyword() @classmethod def search(cls) -> AsyncSearch["Repository"]: # type: ignore[override] return super().search().filter("term", commit_repo="repo") class Index: name = "git" class Commit(AsyncDocument): class Index: name = "flat-git" @pytest.mark.asyncio async def test_filters_aggregation_buckets_are_accessible( async_data_client: AsyncElasticsearch, ) -> None: has_tests_query = Q("term", files="test_elasticsearch_dsl") s = Commit.search()[0:0] s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket( "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query} ).metric("lines", "stats", field="stats.lines") response = await s.execute() assert isinstance( response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket ) assert ( 35 == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count ) assert ( 228 == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max ) @pytest.mark.asyncio async def test_top_hits_are_wrapped_in_response( async_data_client: AsyncElasticsearch, ) -> None: s = Commit.search()[0:0] s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric( "top_commits", "top_hits", size=5 ) response = await s.execute() top_commits = response.aggregations.top_authors.buckets[0].top_commits assert isinstance(top_commits, aggs.TopHitsData) assert 5 == len(top_commits) hits = [h for h in top_commits] assert 5 == len(hits) assert isinstance(hits[0], Commit) @pytest.mark.asyncio async def test_inner_hits_are_wrapped_in_response( async_data_client: AsyncElasticsearch, ) -> None: s = AsyncSearch(index="git")[0:1].query( "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") ) response = await s.execute() commit = response.hits[0] assert isinstance(commit.meta.inner_hits.repo, response.__class__) assert repr(commit.meta.inner_hits.repo[0]).startswith( " None: s = AsyncSearch(index="git")[0:1].query( "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") ) response = await s.execute() d = response.to_dict(recursive=True) assert isinstance(d, dict) assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) # iterating over the results changes the format of the internal AttrDict for hit in response: pass d = response.to_dict(recursive=True) assert isinstance(d, dict) assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) @pytest.mark.asyncio async def test_scan_respects_doc_types(async_data_client: AsyncElasticsearch) -> None: repos = [repo async for repo in Repository.search().scan()] assert 1 == len(repos) assert isinstance(repos[0], Repository) assert repos[0].organization == "elasticsearch" @pytest.mark.asyncio async def test_scan_iterates_through_all_docs( async_data_client: AsyncElasticsearch, ) -> None: s = AsyncSearch(index="flat-git") commits = [commit async for commit in s.scan()] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.asyncio async def test_search_after(async_data_client: AsyncElasticsearch) -> None: page_size = 7 s = AsyncSearch(index="flat-git")[:page_size].sort("authored_date") commits = [] while True: r = await s.execute() commits += r.hits if len(r.hits) < page_size: break s = s.search_after() assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.asyncio async def test_search_after_no_search(async_data_client: AsyncElasticsearch) -> None: s = AsyncSearch(index="flat-git") with raises( ValueError, match="A search must be executed before using search_after" ): s.search_after() await s.count() with raises( ValueError, match="A search must be executed before using search_after" ): s.search_after() @pytest.mark.asyncio async def test_search_after_no_sort(async_data_client: AsyncElasticsearch) -> None: s = AsyncSearch(index="flat-git") r = await s.execute() with raises( ValueError, match="Cannot use search_after when results are not sorted" ): r.search_after() @pytest.mark.asyncio async def test_search_after_no_results(async_data_client: AsyncElasticsearch) -> None: s = AsyncSearch(index="flat-git")[:100].sort("authored_date") r = await s.execute() assert 52 == len(r.hits) s = s.search_after() r = await s.execute() assert 0 == len(r.hits) with raises( ValueError, match="Cannot use search_after when there are no search results" ): r.search_after() @pytest.mark.asyncio async def test_point_in_time(async_data_client: AsyncElasticsearch) -> None: page_size = 7 commits = [] async with AsyncSearch(index="flat-git")[:page_size].point_in_time( keep_alive="30s" ) as s: pit_id = s._extra["pit"]["id"] while True: r = await s.execute() commits += r.hits if len(r.hits) < page_size: break s = s.search_after() assert pit_id == s._extra["pit"]["id"] assert "30s" == s._extra["pit"]["keep_alive"] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.asyncio async def test_iterate(async_data_client: AsyncElasticsearch) -> None: s = AsyncSearch(index="flat-git") commits = [commit async for commit in s.iterate()] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.asyncio async def test_response_is_cached(async_data_client: AsyncElasticsearch) -> None: s = Repository.search() repos = [repo async for repo in s] assert hasattr(s, "_response") assert s._response.hits == repos @pytest.mark.asyncio async def test_multi_search(async_data_client: AsyncElasticsearch) -> None: s1 = Repository.search() s2 = AsyncSearch[Repository](index="flat-git") ms = AsyncMultiSearch[Repository]() ms = ms.add(s1).add(s2) r1, r2 = await ms.execute() assert 1 == len(r1) assert isinstance(r1[0], Repository) assert r1._search is s1 assert 52 == r2.hits.total.value # type: ignore[attr-defined] assert r2._search is s2 @pytest.mark.asyncio async def test_multi_missing(async_data_client: AsyncElasticsearch) -> None: s1 = Repository.search() s2 = AsyncSearch[Repository](index="flat-git") s3 = AsyncSearch[Repository](index="does_not_exist") ms = AsyncMultiSearch[Repository]() ms = ms.add(s1).add(s2).add(s3) with raises(ApiError): await ms.execute() r1, r2, r3 = await ms.execute(raise_on_error=False) assert 1 == len(r1) assert isinstance(r1[0], Repository) assert r1._search is s1 assert 52 == r2.hits.total.value # type: ignore[attr-defined] assert r2._search is s2 assert r3 is None @pytest.mark.asyncio async def test_raw_subfield_can_be_used_in_aggs( async_data_client: AsyncElasticsearch, ) -> None: s = AsyncSearch(index="git")[0:0] s.aggs.bucket("authors", "terms", field="author.name.raw", size=1) r = await s.execute() authors = r.aggregations.authors assert 1 == len(authors) assert {"key": "Honza Král", "doc_count": 52} == authors[0] test_update_by_query.py000066400000000000000000000051071506101734100347370ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import AsyncUpdateByQuery from elasticsearch.dsl.search import Q @pytest.mark.asyncio async def test_update_by_query_no_script( async_write_client: AsyncElasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( AsyncUpdateByQuery(using=async_write_client) .index(index) .filter(~Q("exists", field="is_public")) ) response = await ubq.execute() assert response.total == 52 assert response["took"] > 0 assert not response.timed_out assert response.updated == 52 assert response.deleted == 0 assert response.took > 0 assert response.success() @pytest.mark.asyncio async def test_update_by_query_with_script( async_write_client: AsyncElasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( AsyncUpdateByQuery(using=async_write_client) .index(index) .filter(~Q("exists", field="parent_shas")) .script(source="ctx._source.is_public = false") ) ubq = ubq.params(conflicts="proceed") response = await ubq.execute() assert response.total == 2 assert response.updated == 2 assert response.version_conflicts == 0 @pytest.mark.asyncio async def test_delete_by_query_with_script( async_write_client: AsyncElasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( AsyncUpdateByQuery(using=async_write_client) .index(index) .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3")) .script(source='ctx.op = "delete"') ) ubq = ubq.params(conflicts="proceed") response = await ubq.execute() assert response.total == 1 assert response.deleted == 1 assert response.success() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/000077500000000000000000000000001506101734100300405ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/__init__.py000066400000000000000000000014231506101734100321510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_analysis.py000066400000000000000000000035141506101734100332770ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import analyzer, token_filter, tokenizer @pytest.mark.sync def test_simulate_with_just__builtin_tokenizer( client: Elasticsearch, ) -> None: a = analyzer("my-analyzer", tokenizer="keyword") tokens = (a.simulate("Hello World!", using=client)).tokens assert len(tokens) == 1 assert tokens[0].token == "Hello World!" @pytest.mark.sync def test_simulate_complex(client: Elasticsearch) -> None: a = analyzer( "my-analyzer", tokenizer=tokenizer("split_words", "simple_pattern_split", pattern=":"), filter=["lowercase", token_filter("no-ifs", "stop", stopwords=["if"])], ) tokens = (a.simulate("if:this:works", using=client)).tokens assert len(tokens) == 2 assert ["this", "works"] == [t.token for t in tokens] @pytest.mark.sync def test_simulate_builtin(client: Elasticsearch) -> None: a = analyzer("my-analyzer", "english") tokens = (a.simulate("fixes running")).tokens assert ["fix", "run"] == [t.token for t in tokens] python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_document.py000066400000000000000000000612351506101734100332760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # this file creates several documents using bad or no types because # these are still supported and should be kept functional in spite # of not having appropriate type hints. For that reason the comment # below disables many mypy checks that fails as a result of this. # mypy: disable-error-code="assignment, index, arg-type, call-arg, operator, comparison-overlap, attr-defined" from datetime import datetime from ipaddress import ip_address from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Tuple, Union import pytest from pytest import raises from pytz import timezone from elasticsearch import ConflictError, Elasticsearch, NotFoundError from elasticsearch.dsl import ( Binary, Boolean, Date, DenseVector, Document, Double, InnerDoc, Ip, Keyword, Long, M, Mapping, MetaField, Nested, Object, Q, RankFeatures, Search, Text, analyzer, mapped_field, ) from elasticsearch.dsl.query import Match from elasticsearch.dsl.types import MatchQuery from elasticsearch.dsl.utils import AttrList from elasticsearch.helpers.errors import BulkIndexError snowball = analyzer("my_snow", tokenizer="standard", filter=["lowercase", "snowball"]) class User(InnerDoc): name = Text(fields={"raw": Keyword()}) class Wiki(Document): owner = Object(User) views = Long() ranked = RankFeatures() class Index: name = "test-wiki" class Repository(Document): owner = Object(User) created_at = Date() description = Text(analyzer=snowball) tags = Keyword() @classmethod def search(cls) -> Search["Repository"]: # type: ignore[override] return super().search().filter("term", commit_repo="repo") class Index: name = "git" class Commit(Document): committed_date = Date() authored_date = Date() description = Text(analyzer=snowball) class Index: name = "flat-git" class Meta: mapping = Mapping() class History(InnerDoc): timestamp = Date() diff = Text() class Comment(InnerDoc): content = Text() created_at = Date() author = Object(User) history = Nested(History) class Meta: dynamic = MetaField(False) class PullRequest(Document): comments = Nested(Comment) created_at = Date() class Index: name = "test-prs" class SerializationDoc(Document): i = Long() b = Boolean() d = Double() bin = Binary() ip = Ip() class Index: name = "test-serialization" class Tags(Document): tags = Keyword(multi=True) class Index: name = "tags" @pytest.mark.sync def test_serialization(write_client: Elasticsearch) -> None: SerializationDoc.init() write_client.index( index="test-serialization", id=42, body={ "i": [1, 2, "3", None], "b": [True, False, "true", "false", None], "d": [0.1, "-0.1", None], "bin": ["SGVsbG8gV29ybGQ=", None], "ip": ["::1", "127.0.0.1", None], }, ) sd = SerializationDoc.get(id=42) assert sd is not None assert sd.i == [1, 2, 3, None] assert sd.b == [True, False, True, False, None] assert sd.d == [0.1, -0.1, None] assert sd.bin == [b"Hello World", None] assert sd.ip == [ip_address("::1"), ip_address("127.0.0.1"), None] assert sd.to_dict() == { "b": [True, False, True, False, None], "bin": ["SGVsbG8gV29ybGQ=", None], "d": [0.1, -0.1, None], "i": [1, 2, 3, None], "ip": ["::1", "127.0.0.1", None], } @pytest.mark.sync def test_nested_inner_hits_are_wrapped_properly(pull_request: Any) -> None: history_query = Q( "nested", path="comments.history", inner_hits={}, query=Q("match", comments__history__diff="ahoj"), ) s = PullRequest.search().query( "nested", inner_hits={}, path="comments", query=history_query ) response = s.execute() pr = response.hits[0] assert isinstance(pr, PullRequest) assert isinstance(pr.comments[0], Comment) assert isinstance(pr.comments[0].history[0], History) comment = pr.meta.inner_hits.comments.hits[0] assert isinstance(comment, Comment) assert comment.author.name == "honzakral" assert isinstance(comment.history[0], History) history = comment.meta.inner_hits["comments.history"].hits[0] assert isinstance(history, History) assert history.timestamp == datetime(2012, 1, 1) assert "score" in history.meta @pytest.mark.sync def test_nested_inner_hits_are_deserialized_properly( pull_request: Any, ) -> None: s = PullRequest.search().query( "nested", inner_hits={}, path="comments", query=Q("match", comments__content="hello"), ) response = s.execute() pr = response.hits[0] assert isinstance(pr.created_at, datetime) assert isinstance(pr.comments[0], Comment) assert isinstance(pr.comments[0].created_at, datetime) @pytest.mark.sync def test_nested_top_hits_are_wrapped_properly(pull_request: Any) -> None: s = PullRequest.search() s.aggs.bucket("comments", "nested", path="comments").metric( "hits", "top_hits", size=1 ) r = s.execute() print(r._d_) assert isinstance(r.aggregations.comments.hits.hits[0], Comment) @pytest.mark.sync def test_update_object_field(write_client: Elasticsearch) -> None: Wiki.init() w = Wiki( owner=User(name="Honza Kral"), _id="elasticsearch-py", ranked={"test1": 0.1, "topic2": 0.2}, ) w.save() assert "updated" == w.update(owner=[{"name": "Honza"}, User(name="Nick")]) assert w.owner[0].name == "Honza" assert w.owner[1].name == "Nick" w = Wiki.get(id="elasticsearch-py") assert w.owner[0].name == "Honza" assert w.owner[1].name == "Nick" assert w.ranked == {"test1": 0.1, "topic2": 0.2} @pytest.mark.sync def test_update_script(write_client: Elasticsearch) -> None: Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) w.save() w.update(script="ctx._source.views += params.inc", inc=5) w = Wiki.get(id="elasticsearch-py") assert w.views == 47 @pytest.mark.sync def test_update_script_with_dict(write_client: Elasticsearch) -> None: Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) w.save() w.update( script={ "source": "ctx._source.views += params.inc1 + params.inc2", "params": {"inc1": 2}, "lang": "painless", }, inc2=3, ) w = Wiki.get(id="elasticsearch-py") assert w.views == 47 @pytest.mark.sync def test_update_retry_on_conflict(write_client: Elasticsearch) -> None: Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) w.save() w1 = Wiki.get(id="elasticsearch-py") w2 = Wiki.get(id="elasticsearch-py") assert w1 is not None assert w2 is not None w1.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1) w2.update(script="ctx._source.views += params.inc", inc=5, retry_on_conflict=1) w = Wiki.get(id="elasticsearch-py") assert w.views == 52 @pytest.mark.sync @pytest.mark.parametrize("retry_on_conflict", [None, 0]) def test_update_conflicting_version( write_client: Elasticsearch, retry_on_conflict: bool ) -> None: Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) w.save() w1 = Wiki.get(id="elasticsearch-py") w2 = Wiki.get(id="elasticsearch-py") assert w1 is not None assert w2 is not None w1.update(script="ctx._source.views += params.inc", inc=5) with raises(ConflictError): w2.update( script="ctx._source.views += params.inc", inc=5, retry_on_conflict=retry_on_conflict, ) @pytest.mark.sync def test_save_and_update_return_doc_meta( write_client: Elasticsearch, ) -> None: Wiki.init() w = Wiki(owner=User(name="Honza Kral"), _id="elasticsearch-py", views=42) resp = w.save(return_doc_meta=True) assert resp["_index"] == "test-wiki" assert resp["result"] == "created" assert set(resp.keys()) == { "_id", "_index", "_primary_term", "_seq_no", "_shards", "_version", "result", } resp = w.update( script="ctx._source.views += params.inc", inc=5, return_doc_meta=True ) assert resp["_index"] == "test-wiki" assert resp["result"] == "updated" assert set(resp.keys()) == { "_id", "_index", "_primary_term", "_seq_no", "_shards", "_version", "result", } @pytest.mark.sync def test_init(write_client: Elasticsearch) -> None: Repository.init(index="test-git") assert write_client.indices.exists(index="test-git") @pytest.mark.sync def test_get_raises_404_on_index_missing( data_client: Elasticsearch, ) -> None: with raises(NotFoundError): Repository.get("elasticsearch-dsl-php", index="not-there") @pytest.mark.sync def test_get_raises_404_on_non_existent_id( data_client: Elasticsearch, ) -> None: with raises(NotFoundError): Repository.get("elasticsearch-dsl-php") @pytest.mark.sync def test_get_returns_none_if_404_ignored( data_client: Elasticsearch, ) -> None: assert None is Repository.get( "elasticsearch-dsl-php", using=data_client.options(ignore_status=404) ) @pytest.mark.sync def test_get_returns_none_if_404_ignored_and_index_doesnt_exist( data_client: Elasticsearch, ) -> None: assert None is Repository.get( "42", index="not-there", using=data_client.options(ignore_status=404) ) @pytest.mark.sync def test_get(data_client: Elasticsearch) -> None: elasticsearch_repo = Repository.get("elasticsearch-dsl-py") assert isinstance(elasticsearch_repo, Repository) assert elasticsearch_repo.owner.name == "elasticsearch" assert datetime(2014, 3, 3) == elasticsearch_repo.created_at @pytest.mark.sync def test_exists_return_true(data_client: Elasticsearch) -> None: assert Repository.exists("elasticsearch-dsl-py") @pytest.mark.sync def test_exists_false(data_client: Elasticsearch) -> None: assert not Repository.exists("elasticsearch-dsl-php") @pytest.mark.sync def test_get_with_tz_date(data_client: Elasticsearch) -> None: first_commit = Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None tzinfo = timezone("Europe/Prague") assert ( tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123000)) == first_commit.authored_date ) @pytest.mark.sync def test_save_with_tz_date(data_client: Elasticsearch) -> None: tzinfo = timezone("Europe/Prague") first_commit = Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None first_commit.committed_date = tzinfo.localize( datetime(2014, 5, 2, 13, 47, 19, 123456) ) first_commit.save() first_commit = Commit.get( id="3ca6e1e73a071a705b4babd2f581c91a2a3e5037", routing="elasticsearch-dsl-py" ) assert first_commit is not None assert ( tzinfo.localize(datetime(2014, 5, 2, 13, 47, 19, 123456)) == first_commit.committed_date ) COMMIT_DOCS_WITH_MISSING = [ {"_id": "0"}, # Missing {"_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037"}, # Existing {"_id": "f"}, # Missing {"_id": "eb3e543323f189fd7b698e66295427204fff5755"}, # Existing ] @pytest.mark.sync def test_mget(data_client: Elasticsearch) -> None: commits = Commit.mget(COMMIT_DOCS_WITH_MISSING) assert commits[0] is None assert commits[1] is not None assert commits[1].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" assert commits[2] is None assert commits[3] is not None assert commits[3].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" @pytest.mark.sync def test_mget_raises_exception_when_missing_param_is_invalid( data_client: Elasticsearch, ) -> None: with raises(ValueError): Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raj") @pytest.mark.sync def test_mget_raises_404_when_missing_param_is_raise( data_client: Elasticsearch, ) -> None: with raises(NotFoundError): Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="raise") @pytest.mark.sync def test_mget_ignores_missing_docs_when_missing_param_is_skip( data_client: Elasticsearch, ) -> None: commits = Commit.mget(COMMIT_DOCS_WITH_MISSING, missing="skip") assert commits[0] is not None assert commits[0].meta.id == "3ca6e1e73a071a705b4babd2f581c91a2a3e5037" assert commits[1] is not None assert commits[1].meta.id == "eb3e543323f189fd7b698e66295427204fff5755" @pytest.mark.sync def test_update_works_from_search_response( data_client: Elasticsearch, ) -> None: elasticsearch_repo = (Repository.search().execute())[0] elasticsearch_repo.update(owner={"other_name": "elastic"}) assert "elastic" == elasticsearch_repo.owner.other_name new_version = Repository.get("elasticsearch-dsl-py") assert new_version is not None assert "elastic" == new_version.owner.other_name assert "elasticsearch" == new_version.owner.name @pytest.mark.sync def test_update(data_client: Elasticsearch) -> None: elasticsearch_repo = Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None v = elasticsearch_repo.meta.version old_seq_no = elasticsearch_repo.meta.seq_no elasticsearch_repo.update(owner={"new_name": "elastic"}, new_field="testing-update") assert "elastic" == elasticsearch_repo.owner.new_name assert "testing-update" == elasticsearch_repo.new_field # assert version has been updated assert elasticsearch_repo.meta.version == v + 1 new_version = Repository.get("elasticsearch-dsl-py") assert new_version is not None assert "testing-update" == new_version.new_field assert "elastic" == new_version.owner.new_name assert "elasticsearch" == new_version.owner.name assert "seq_no" in new_version.meta assert new_version.meta.seq_no != old_seq_no assert "primary_term" in new_version.meta @pytest.mark.sync def test_save_updates_existing_doc(data_client: Elasticsearch) -> None: elasticsearch_repo = Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.new_field = "testing-save" old_seq_no = elasticsearch_repo.meta.seq_no assert "updated" == elasticsearch_repo.save() new_repo = data_client.get(index="git", id="elasticsearch-dsl-py") assert "testing-save" == new_repo["_source"]["new_field"] assert new_repo["_seq_no"] != old_seq_no assert new_repo["_seq_no"] == elasticsearch_repo.meta.seq_no @pytest.mark.sync def test_update_empty_field(client: Elasticsearch) -> None: Tags._index.delete(ignore_unavailable=True) Tags.init() d = Tags(id="123", tags=["a", "b"]) d.save(refresh=True) d.update(tags=[], refresh=True) assert d.tags == [] r = Tags.search().execute() assert r.hits[0].tags == [] @pytest.mark.sync def test_save_automatically_uses_seq_no_and_primary_term( data_client: Elasticsearch, ) -> None: elasticsearch_repo = Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.meta.seq_no += 1 with raises(ConflictError): elasticsearch_repo.save() @pytest.mark.sync def test_delete_automatically_uses_seq_no_and_primary_term( data_client: Elasticsearch, ) -> None: elasticsearch_repo = Repository.get("elasticsearch-dsl-py") assert elasticsearch_repo is not None elasticsearch_repo.meta.seq_no += 1 with raises(ConflictError): elasticsearch_repo.delete() def assert_doc_equals(expected: Any, actual: Any) -> None: for f in expected: assert f in actual assert actual[f] == expected[f] @pytest.mark.sync def test_can_save_to_different_index( write_client: Elasticsearch, ) -> None: test_repo = Repository(description="testing", meta={"id": 42}) assert test_repo.save(index="test-document") assert_doc_equals( { "found": True, "_index": "test-document", "_id": "42", "_source": {"description": "testing"}, }, write_client.get(index="test-document", id=42), ) @pytest.mark.sync def test_save_without_skip_empty_will_include_empty_fields( write_client: Elasticsearch, ) -> None: test_repo = Repository( field_1=[], field_2=None, field_3={}, owner={"name": None}, meta={"id": 42} ) assert test_repo.save(index="test-document", skip_empty=False) assert_doc_equals( { "found": True, "_index": "test-document", "_id": "42", "_source": { "field_1": [], "field_2": None, "field_3": {}, "owner": {"name": None}, }, }, write_client.get(index="test-document", id=42), ) @pytest.mark.sync def test_delete(write_client: Elasticsearch) -> None: write_client.create( index="test-document", id="elasticsearch-dsl-py", body={ "organization": "elasticsearch", "created_at": "2014-03-03", "owner": {"name": "elasticsearch"}, }, ) test_repo = Repository(meta={"id": "elasticsearch-dsl-py"}) test_repo.meta.index = "test-document" test_repo.delete() assert not write_client.exists( index="test-document", id="elasticsearch-dsl-py", ) @pytest.mark.sync def test_search(data_client: Elasticsearch) -> None: assert Repository.search().count() == 1 @pytest.mark.sync def test_search_returns_proper_doc_classes( data_client: Elasticsearch, ) -> None: result = Repository.search().execute() elasticsearch_repo = result.hits[0] assert isinstance(elasticsearch_repo, Repository) assert elasticsearch_repo.owner.name == "elasticsearch" @pytest.mark.sync def test_refresh_mapping(data_client: Elasticsearch) -> None: class Commit(Document): class Index: name = "git" Commit._index.load_mappings() assert "stats" in Commit._index._mapping assert "committer" in Commit._index._mapping assert "description" in Commit._index._mapping assert "committed_date" in Commit._index._mapping assert isinstance(Commit._index._mapping["committed_date"], Date) @pytest.mark.sync def test_highlight_in_meta(data_client: Elasticsearch) -> None: commit = ( Commit.search() .query("match", description="inverting") .highlight("description") .execute() )[0] assert isinstance(commit, Commit) assert "description" in commit.meta.highlight assert isinstance(commit.meta.highlight["description"], AttrList) assert len(commit.meta.highlight["description"]) > 0 @pytest.mark.sync def test_bulk(data_client: Elasticsearch) -> None: class Address(InnerDoc): street: str active: bool class Doc(Document): if TYPE_CHECKING: _id: int name: str age: int languages: List[str] = mapped_field(Keyword()) addresses: List[Address] class Index: name = "bulk-index" Doc._index.delete(ignore_unavailable=True) Doc.init() def gen1() -> Iterator[Union[Doc, Dict[str, Any]]]: yield Doc( name="Joe", age=33, languages=["en", "fr"], addresses=[ Address(street="123 Main St", active=True), Address(street="321 Park Dr.", active=False), ], ) yield Doc(name="Susan", age=20, languages=["en"]) yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} Doc.bulk(gen1(), refresh=True) docs = list(Doc.search().execute()) assert len(docs) == 3 assert docs[0].to_dict() == { "name": "Joe", "age": 33, "languages": [ "en", "fr", ], "addresses": [ { "active": True, "street": "123 Main St", }, { "active": False, "street": "321 Park Dr.", }, ], } assert docs[1].to_dict() == { "name": "Susan", "age": 20, "languages": ["en"], } assert docs[2].to_dict() == { "name": "Sarah", "age": 45, } assert docs[2].meta.id == "45" def gen2() -> Iterator[Union[Doc, Dict[str, Any]]]: yield {"_op_type": "create", "_id": "45", "_source": Doc(name="Sarah", age=45)} # a "create" action with an existing id should fail with raises(BulkIndexError): Doc.bulk(gen2(), refresh=True) def gen3() -> Iterator[Union[Doc, Dict[str, Any]]]: yield Doc(_id="45", name="Sarah", age=45, languages=["es"]) yield {"_op_type": "delete", "_id": docs[1].meta.id} Doc.bulk(gen3(), refresh=True) with raises(NotFoundError): Doc.get(docs[1].meta.id) doc = Doc.get("45") assert doc is not None assert (doc).to_dict() == { "name": "Sarah", "age": 45, "languages": ["es"], } @pytest.mark.sync def test_legacy_dense_vector( client: Elasticsearch, es_version: Tuple[int, ...] ) -> None: if es_version >= (8, 16): pytest.skip("this test is a legacy version for Elasticsearch 8.15 or older") class Doc(Document): float_vector: List[float] = mapped_field(DenseVector(dims=3)) class Index: name = "vectors" Doc._index.delete(ignore_unavailable=True) Doc.init() doc = Doc(float_vector=[1.0, 1.2, 2.3]) doc.save(refresh=True) docs = Doc.search().execute() assert len(docs) == 1 assert docs[0].float_vector == doc.float_vector @pytest.mark.sync def test_dense_vector(client: Elasticsearch, es_version: Tuple[int, ...]) -> None: if es_version < (8, 16): pytest.skip("this test requires Elasticsearch 8.16 or newer") class Doc(Document): float_vector: List[float] = mapped_field(DenseVector()) byte_vector: List[int] = mapped_field(DenseVector(element_type="byte")) bit_vector: str = mapped_field(DenseVector(element_type="bit")) class Index: name = "vectors" Doc._index.delete(ignore_unavailable=True) Doc.init() doc = Doc( float_vector=[1.0, 1.2, 2.3], byte_vector=[12, 23, 34, 45], bit_vector="12abf0" ) doc.save(refresh=True) docs = Doc.search().execute() assert len(docs) == 1 assert docs[0].float_vector == doc.float_vector assert docs[0].byte_vector == doc.byte_vector assert docs[0].bit_vector == doc.bit_vector @pytest.mark.sync def test_copy_to(client: Elasticsearch) -> None: class Person(Document): first_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) last_name: M[str] = mapped_field(Text(copy_to=["full_name", "all"])) birth_place: M[str] = mapped_field(Text(copy_to="all")) full_name: M[Optional[str]] = mapped_field(init=False) all: M[Optional[str]] = mapped_field(init=False) class Index: name = "people" Person._index.delete(ignore_unavailable=True) Person.init() person = Person(first_name="Jane", last_name="Doe", birth_place="Springfield") person.save() Person._index.refresh() match = ( Person.search() .query(Match(Person.full_name, MatchQuery(query="Jane"))) .execute() ) assert len(match) == 1 match = Person.search().query(Match(Person.all, MatchQuery(query="Doe"))).execute() assert len(match) == 1 match = ( Person.search() .query(Match(Person.full_name, MatchQuery(query="Springfield"))) .execute() ) assert len(match) == 0 match = ( Person.search() .query(Match(Person.all, MatchQuery(query="Springfield"))) .execute() ) assert len(match) == 1 python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_esql.py000066400000000000000000000163731506101734100324270ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch.dsl import Document, InnerDoc, M from elasticsearch.esql import ESQL, E, functions class Address(InnerDoc): address: M[str] city: M[str] class Employee(Document): emp_no: M[int] first_name: M[str] last_name: M[str] height: M[float] still_hired: M[bool] address: M[Address] class Index: name = "employees" def load_db(): data = [ [ 10000, "Joseph", "Wall", 2.2, True, Address(address="8875 Long Shoals Suite 441", city="Marcville, TX"), ], [ 10001, "Stephanie", "Ward", 1.749, True, Address(address="90162 Carter Harbor Suite 099", city="Davisborough, DE"), ], [ 10002, "David", "Keller", 1.872, True, Address(address="6697 Patrick Union Suite 797", city="Fuentesmouth, SD"), ], [ 10003, "Roger", "Hinton", 1.694, False, Address(address="809 Kelly Mountains", city="South Megan, DE"), ], [ 10004, "Joshua", "Garcia", 1.661, False, Address(address="718 Angela Forks", city="Port Erinland, MA"), ], [ 10005, "Matthew", "Richards", 1.633, False, Address(address="2869 Brown Mountains", city="New Debra, NH"), ], [ 10006, "Maria", "Luna", 1.893, True, Address(address="5861 Morgan Springs", city="Lake Daniel, WI"), ], [ 10007, "Angela", "Navarro", 1.604, False, Address(address="2848 Allen Station", city="Saint Joseph, OR"), ], [ 10008, "Maria", "Cannon", 2.079, False, Address(address="322 NW Johnston", city="Bakerburgh, MP"), ], [ 10009, "Joseph", "Sutton", 2.025, True, Address(address="77 Cardinal E", city="Lakestown, IL"), ], ] if Employee._index.exists(): Employee._index.delete() Employee.init() for e in data: employee = Employee( emp_no=e[0], first_name=e[1], last_name=e[2], height=e[3], still_hired=e[4], address=e[5], ) employee.save() Employee._index.refresh() @pytest.mark.sync def test_esql(client): load_db() # get the full names of the employees query = ( ESQL.from_(Employee) .eval(full_name=functions.concat(Employee.first_name, " ", Employee.last_name)) .keep("full_name") .sort("full_name") .limit(10) ) r = client.esql.query(query=str(query)) assert r.body["values"] == [ ["Angela Navarro"], ["David Keller"], ["Joseph Sutton"], ["Joseph Wall"], ["Joshua Garcia"], ["Maria Cannon"], ["Maria Luna"], ["Matthew Richards"], ["Roger Hinton"], ["Stephanie Ward"], ] # get the average height of all hired employees query = ESQL.from_(Employee).stats( avg_height=functions.round(functions.avg(Employee.height), 2).where( Employee.still_hired == True # noqa: E712 ) ) r = client.esql.query(query=str(query)) assert r.body["values"] == [[1.95]] # find employees by name using a parameter query = ( ESQL.from_(Employee) .where(Employee.first_name == E("?")) .keep(Employee.last_name) .sort(Employee.last_name.desc()) ) r = client.esql.query(query=str(query), params=["Maria"]) assert r.body["values"] == [["Luna"], ["Cannon"]] @pytest.mark.sync def test_esql_dsl(client): load_db() # get employees with first name "Maria" query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .sort("last_name") .limit(10) ) marias = [] for emp in Employee.esql_execute(query): marias.append(emp) assert len(marias) == 2 assert marias[0].last_name == "Cannon" assert marias[0].address.address == "322 NW Johnston" assert marias[0].address.city == "Bakerburgh, MP" assert marias[1].last_name == "Luna" assert marias[1].address.address == "5861 Morgan Springs" assert marias[1].address.city == "Lake Daniel, WI" # run a query with a missing field query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .drop(Employee.address.city) .sort("last_name") .limit(10) ) with pytest.raises(ValueError): Employee.esql_execute(query).__next__() marias = [] for emp in Employee.esql_execute(query, ignore_missing_fields=True): marias.append(emp) assert marias[0].last_name == "Cannon" assert marias[0].address.address == "322 NW Johnston" assert marias[0].address.city is None assert marias[1].last_name == "Luna" assert marias[1].address.address == "5861 Morgan Springs" assert marias[1].address.city is None # run a query with additional calculated fields query = ( Employee.esql_from() .where(Employee.first_name == "Maria") .eval( full_name=functions.concat(Employee.first_name, " ", Employee.last_name), height_cm=functions.to_integer(Employee.height * 100), ) .sort("last_name") .limit(10) ) assert isinstance(Employee.esql_execute(query).__next__(), Employee) assert isinstance( Employee.esql_execute(query, return_additional=True).__next__(), tuple ) marias = [] for emp, extra in Employee.esql_execute(query, return_additional=True): marias.append([emp, extra]) assert marias[0][0].last_name == "Cannon" assert marias[0][0].address.address == "322 NW Johnston" assert marias[0][0].address.city == "Bakerburgh, MP" assert marias[0][1] == {"full_name": "Maria Cannon", "height_cm": 208} assert marias[1][0].last_name == "Luna" assert marias[1][0].address.address == "5861 Morgan Springs" assert marias[1][0].address.city == "Lake Daniel, WI" assert marias[1][1] == {"full_name": "Maria Luna", "height_cm": 189} python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_faceted_search.py000066400000000000000000000223611506101734100343750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Tuple, Type import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import A, Boolean, Date, Document, Keyword, Search from elasticsearch.dsl.faceted_search import ( DateHistogramFacet, FacetedSearch, NestedFacet, RangeFacet, TermsFacet, ) from .test_document import PullRequest class Repos(Document): is_public = Boolean() created_at = Date() class Index: name = "git" class Commit(Document): files = Keyword() committed_date = Date() class Index: name = "git" class MetricSearch(FacetedSearch): index = "git" doc_types = [Commit] facets = { "files": TermsFacet(field="files", metric=A("max", field="committed_date")), } @pytest.fixture def commit_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: if es_version >= (7, 2): interval_kwargs = {"fixed_interval": "1d"} else: interval_kwargs = {"interval": "day"} class CommitSearch(FacetedSearch): index = "flat-git" fields = ( "description", "files", ) facets = { "files": TermsFacet(field="files"), "frequency": DateHistogramFacet( field="authored_date", min_doc_count=1, **interval_kwargs ), "deletions": RangeFacet( field="stats.deletions", ranges=[("ok", (None, 1)), ("good", (1, 5)), ("better", (5, None))], ), } return CommitSearch @pytest.fixture def repo_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" class RepoSearch(FacetedSearch): index = "git" doc_types = [Repos] facets = { "public": TermsFacet(field="is_public"), "created": DateHistogramFacet( field="created_at", **{interval_type: "month"} ), } def search(self) -> Search: s = super().search() return s.filter("term", commit_repo="repo") return RepoSearch @pytest.fixture def pr_search_cls(es_version: Tuple[int, ...]) -> Type[FacetedSearch]: interval_type = "calendar_interval" if es_version >= (7, 2) else "interval" class PRSearch(FacetedSearch): index = "test-prs" doc_types = [PullRequest] facets = { "comments": NestedFacet( "comments", DateHistogramFacet( field="comments.created_at", **{interval_type: "month"} ), ) } return PRSearch @pytest.mark.sync def test_facet_with_custom_metric(data_client: Elasticsearch) -> None: ms = MetricSearch() r = ms.execute() dates = [f[1] for f in r.facets.files] assert dates == list(sorted(dates, reverse=True)) assert dates[0] == 1399038439000 @pytest.mark.sync def test_nested_facet( pull_request: PullRequest, pr_search_cls: Type[FacetedSearch] ) -> None: prs = pr_search_cls() r = prs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2018, 1, 1, 0, 0), 1, False)] == r.facets.comments @pytest.mark.sync def test_nested_facet_with_filter( pull_request: PullRequest, pr_search_cls: Type[FacetedSearch] ) -> None: prs = pr_search_cls(filters={"comments": datetime(2018, 1, 1, 0, 0)}) r = prs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2018, 1, 1, 0, 0), 1, True)] == r.facets.comments prs = pr_search_cls(filters={"comments": datetime(2018, 2, 1, 0, 0)}) r = prs.execute() assert not r.hits @pytest.mark.sync def test_datehistogram_facet( data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch] ) -> None: rs = repo_search_cls() r = rs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(datetime(2014, 3, 1, 0, 0), 1, False)] == r.facets.created @pytest.mark.sync def test_boolean_facet( data_client: Elasticsearch, repo_search_cls: Type[FacetedSearch] ) -> None: rs = repo_search_cls() r = rs.execute() assert r.hits.total.value == 1 # type: ignore[attr-defined] assert [(True, 1, False)] == r.facets.public value, count, selected = r.facets.public[0] assert value is True @pytest.mark.sync def test_empty_search_finds_everything( data_client: Elasticsearch, es_version: Tuple[int, ...], commit_search_cls: Type[FacetedSearch], ) -> None: cs = commit_search_cls() r = cs.execute() assert r.hits.total.value == 52 # type: ignore[attr-defined] assert [ ("elasticsearch_dsl", 40, False), ("test_elasticsearch_dsl", 35, False), ("elasticsearch_dsl/query.py", 19, False), ("test_elasticsearch_dsl/test_search.py", 15, False), ("elasticsearch_dsl/utils.py", 14, False), ("test_elasticsearch_dsl/test_query.py", 13, False), ("elasticsearch_dsl/search.py", 12, False), ("elasticsearch_dsl/aggs.py", 11, False), ("test_elasticsearch_dsl/test_result.py", 5, False), ("elasticsearch_dsl/result.py", 3, False), ] == r.facets.files assert [ (datetime(2014, 3, 3, 0, 0), 2, False), (datetime(2014, 3, 4, 0, 0), 1, False), (datetime(2014, 3, 5, 0, 0), 3, False), (datetime(2014, 3, 6, 0, 0), 3, False), (datetime(2014, 3, 7, 0, 0), 9, False), (datetime(2014, 3, 10, 0, 0), 2, False), (datetime(2014, 3, 15, 0, 0), 4, False), (datetime(2014, 3, 21, 0, 0), 2, False), (datetime(2014, 3, 23, 0, 0), 2, False), (datetime(2014, 3, 24, 0, 0), 10, False), (datetime(2014, 4, 20, 0, 0), 2, False), (datetime(2014, 4, 22, 0, 0), 2, False), (datetime(2014, 4, 25, 0, 0), 3, False), (datetime(2014, 4, 26, 0, 0), 2, False), (datetime(2014, 4, 27, 0, 0), 2, False), (datetime(2014, 5, 1, 0, 0), 2, False), (datetime(2014, 5, 2, 0, 0), 1, False), ] == r.facets.frequency assert [ ("ok", 19, False), ("good", 14, False), ("better", 19, False), ] == r.facets.deletions @pytest.mark.sync def test_term_filters_are_shown_as_selected_and_data_is_filtered( data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] ) -> None: cs = commit_search_cls(filters={"files": "test_elasticsearch_dsl"}) r = cs.execute() assert 35 == r.hits.total.value # type: ignore[attr-defined] assert [ ("elasticsearch_dsl", 40, False), ("test_elasticsearch_dsl", 35, True), # selected ("elasticsearch_dsl/query.py", 19, False), ("test_elasticsearch_dsl/test_search.py", 15, False), ("elasticsearch_dsl/utils.py", 14, False), ("test_elasticsearch_dsl/test_query.py", 13, False), ("elasticsearch_dsl/search.py", 12, False), ("elasticsearch_dsl/aggs.py", 11, False), ("test_elasticsearch_dsl/test_result.py", 5, False), ("elasticsearch_dsl/result.py", 3, False), ] == r.facets.files assert [ (datetime(2014, 3, 3, 0, 0), 1, False), (datetime(2014, 3, 5, 0, 0), 2, False), (datetime(2014, 3, 6, 0, 0), 3, False), (datetime(2014, 3, 7, 0, 0), 6, False), (datetime(2014, 3, 10, 0, 0), 1, False), (datetime(2014, 3, 15, 0, 0), 3, False), (datetime(2014, 3, 21, 0, 0), 2, False), (datetime(2014, 3, 23, 0, 0), 1, False), (datetime(2014, 3, 24, 0, 0), 7, False), (datetime(2014, 4, 20, 0, 0), 1, False), (datetime(2014, 4, 25, 0, 0), 3, False), (datetime(2014, 4, 26, 0, 0), 2, False), (datetime(2014, 4, 27, 0, 0), 1, False), (datetime(2014, 5, 1, 0, 0), 1, False), (datetime(2014, 5, 2, 0, 0), 1, False), ] == r.facets.frequency assert [ ("ok", 12, False), ("good", 10, False), ("better", 13, False), ] == r.facets.deletions @pytest.mark.sync def test_range_filters_are_shown_as_selected_and_data_is_filtered( data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] ) -> None: cs = commit_search_cls(filters={"deletions": "better"}) r = cs.execute() assert 19 == r.hits.total.value # type: ignore[attr-defined] @pytest.mark.sync def test_pagination( data_client: Elasticsearch, commit_search_cls: Type[FacetedSearch] ) -> None: cs = commit_search_cls() cs = cs[0:20] assert 52 == cs.count() assert 20 == len(cs.execute()) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_index.py000066400000000000000000000113121506101734100325560ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import ( ComposableIndexTemplate, Date, Document, Index, IndexTemplate, Text, analysis, ) class Post(Document): title = Text(analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword")) published_from = Date() @pytest.mark.sync def test_index_template_works(write_client: Elasticsearch) -> None: it = IndexTemplate("test-template", "test-legacy-*") it.document(Post) it.settings(number_of_replicas=0, number_of_shards=1) it.save() i = Index("test-legacy-blog") i.create() assert { "test-legacy-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == write_client.indices.get_mapping(index="test-legacy-blog") @pytest.mark.sync def test_composable_index_template_works( write_client: Elasticsearch, ) -> None: it = ComposableIndexTemplate("test-template", "test-*") it.document(Post) it.settings(number_of_replicas=0, number_of_shards=1) it.save() i = Index("test-blog") i.create() assert { "test-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == write_client.indices.get_mapping(index="test-blog") @pytest.mark.sync def test_index_can_be_saved_even_with_settings( write_client: Elasticsearch, ) -> None: i = Index("test-blog", using=write_client) i.settings(number_of_shards=3, number_of_replicas=0) i.save() i.settings(number_of_replicas=1) i.save() assert ( "1" == (i.get_settings())["test-blog"]["settings"]["index"]["number_of_replicas"] ) @pytest.mark.sync def test_index_exists(data_client: Elasticsearch) -> None: assert Index("git").exists() assert not Index("not-there").exists() @pytest.mark.sync def test_index_can_be_created_with_settings_and_mappings( write_client: Elasticsearch, ) -> None: i = Index("test-blog", using=write_client) i.document(Post) i.settings(number_of_replicas=0, number_of_shards=1) i.create() assert { "test-blog": { "mappings": { "properties": { "title": {"type": "text", "analyzer": "my_analyzer"}, "published_from": {"type": "date"}, } } } } == write_client.indices.get_mapping(index="test-blog") settings = write_client.indices.get_settings(index="test-blog") assert settings["test-blog"]["settings"]["index"]["number_of_replicas"] == "0" assert settings["test-blog"]["settings"]["index"]["number_of_shards"] == "1" assert settings["test-blog"]["settings"]["index"]["analysis"] == { "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} } @pytest.mark.sync def test_delete(write_client: Elasticsearch) -> None: write_client.indices.create( index="test-index", body={"settings": {"number_of_replicas": 0, "number_of_shards": 1}}, ) i = Index("test-index", using=write_client) i.delete() assert not write_client.indices.exists(index="test-index") @pytest.mark.sync def test_multiple_indices_with_same_doc_type_work( write_client: Elasticsearch, ) -> None: i1 = Index("test-index-1", using=write_client) i2 = Index("test-index-2", using=write_client) for i in (i1, i2): i.document(Post) i.create() for j in ("test-index-1", "test-index-2"): settings = write_client.indices.get_settings(index=j) assert settings[j]["settings"]["index"]["analysis"] == { "analyzer": {"my_analyzer": {"type": "custom", "tokenizer": "keyword"}} } python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_mapping.py000066400000000000000000000127521506101734100331130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from pytest import raises from elasticsearch import Elasticsearch from elasticsearch.dsl import Mapping, analysis, exceptions @pytest.mark.sync def test_mapping_saved_into_es(write_client: Elasticsearch) -> None: m = Mapping() m.field( "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") ) m.field("tags", "keyword") m.save("test-mapping", using=write_client) assert { "test-mapping": { "mappings": { "properties": { "name": {"type": "text", "analyzer": "my_analyzer"}, "tags": {"type": "keyword"}, } } } } == write_client.indices.get_mapping(index="test-mapping") @pytest.mark.sync def test_mapping_saved_into_es_when_index_already_exists_closed( write_client: Elasticsearch, ) -> None: m = Mapping() m.field( "name", "text", analyzer=analysis.analyzer("my_analyzer", tokenizer="keyword") ) write_client.indices.create(index="test-mapping") with raises(exceptions.IllegalOperation): m.save("test-mapping", using=write_client) write_client.cluster.health(index="test-mapping", wait_for_status="yellow") write_client.indices.close(index="test-mapping") m.save("test-mapping", using=write_client) assert { "test-mapping": { "mappings": { "properties": {"name": {"type": "text", "analyzer": "my_analyzer"}} } } } == write_client.indices.get_mapping(index="test-mapping") @pytest.mark.sync def test_mapping_saved_into_es_when_index_already_exists_with_analysis( write_client: Elasticsearch, ) -> None: m = Mapping() analyzer = analysis.analyzer("my_analyzer", tokenizer="keyword") m.field("name", "text", analyzer=analyzer) new_analysis = analyzer.get_analysis_definition() new_analysis["analyzer"]["other_analyzer"] = { "type": "custom", "tokenizer": "whitespace", } write_client.indices.create( index="test-mapping", body={"settings": {"analysis": new_analysis}} ) m.field("title", "text", analyzer=analyzer) m.save("test-mapping", using=write_client) assert { "test-mapping": { "mappings": { "properties": { "name": {"type": "text", "analyzer": "my_analyzer"}, "title": {"type": "text", "analyzer": "my_analyzer"}, } } } } == write_client.indices.get_mapping(index="test-mapping") @pytest.mark.sync def test_mapping_gets_updated_from_es( write_client: Elasticsearch, ) -> None: write_client.indices.create( index="test-mapping", body={ "settings": {"number_of_shards": 1, "number_of_replicas": 0}, "mappings": { "date_detection": False, "properties": { "title": { "type": "text", "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, }, "created_at": {"type": "date"}, "comments": { "type": "nested", "properties": { "created": {"type": "date"}, "author": { "type": "text", "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, }, }, }, }, }, }, ) m = Mapping.from_es("test-mapping", using=write_client) assert ["comments", "created_at", "title"] == list( sorted(m.properties.properties._d_.keys()) # type: ignore[attr-defined] ) assert { "date_detection": False, "properties": { "comments": { "type": "nested", "properties": { "created": {"type": "date"}, "author": { "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, "type": "text", }, }, }, "created_at": {"type": "date"}, "title": { "analyzer": "snowball", "fields": {"raw": {"type": "keyword"}}, "type": "text", }, }, } == m.to_dict() # test same with alias write_client.indices.put_alias(index="test-mapping", name="test-alias") m2 = Mapping.from_es("test-alias", using=write_client) assert m2.to_dict() == m.to_dict() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync/test_search.py000066400000000000000000000204731506101734100327240ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from pytest import raises from elasticsearch import ApiError, Elasticsearch from elasticsearch.dsl import ( Date, Document, Keyword, MultiSearch, Q, Search, Text, ) from elasticsearch.dsl.response import aggs from ..test_data import FLAT_DATA class Repository(Document): created_at = Date() description = Text(analyzer="snowball") tags = Keyword() @classmethod def search(cls) -> Search["Repository"]: # type: ignore[override] return super().search().filter("term", commit_repo="repo") class Index: name = "git" class Commit(Document): class Index: name = "flat-git" @pytest.mark.sync def test_filters_aggregation_buckets_are_accessible( data_client: Elasticsearch, ) -> None: has_tests_query = Q("term", files="test_elasticsearch_dsl") s = Commit.search()[0:0] s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket( "has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query} ).metric("lines", "stats", field="stats.lines") response = s.execute() assert isinstance( response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket ) assert ( 35 == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count ) assert ( 228 == response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max ) @pytest.mark.sync def test_top_hits_are_wrapped_in_response( data_client: Elasticsearch, ) -> None: s = Commit.search()[0:0] s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric( "top_commits", "top_hits", size=5 ) response = s.execute() top_commits = response.aggregations.top_authors.buckets[0].top_commits assert isinstance(top_commits, aggs.TopHitsData) assert 5 == len(top_commits) hits = [h for h in top_commits] assert 5 == len(hits) assert isinstance(hits[0], Commit) @pytest.mark.sync def test_inner_hits_are_wrapped_in_response( data_client: Elasticsearch, ) -> None: s = Search(index="git")[0:1].query( "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") ) response = s.execute() commit = response.hits[0] assert isinstance(commit.meta.inner_hits.repo, response.__class__) assert repr(commit.meta.inner_hits.repo[0]).startswith( " None: s = Search(index="git")[0:1].query( "has_parent", parent_type="repo", inner_hits={}, query=Q("match_all") ) response = s.execute() d = response.to_dict(recursive=True) assert isinstance(d, dict) assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) # iterating over the results changes the format of the internal AttrDict for hit in response: pass d = response.to_dict(recursive=True) assert isinstance(d, dict) assert isinstance(d["hits"]["hits"][0]["inner_hits"]["repo"], dict) @pytest.mark.sync def test_scan_respects_doc_types(data_client: Elasticsearch) -> None: repos = [repo for repo in Repository.search().scan()] assert 1 == len(repos) assert isinstance(repos[0], Repository) assert repos[0].organization == "elasticsearch" @pytest.mark.sync def test_scan_iterates_through_all_docs( data_client: Elasticsearch, ) -> None: s = Search(index="flat-git") commits = [commit for commit in s.scan()] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.sync def test_search_after(data_client: Elasticsearch) -> None: page_size = 7 s = Search(index="flat-git")[:page_size].sort("authored_date") commits = [] while True: r = s.execute() commits += r.hits if len(r.hits) < page_size: break s = s.search_after() assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.sync def test_search_after_no_search(data_client: Elasticsearch) -> None: s = Search(index="flat-git") with raises( ValueError, match="A search must be executed before using search_after" ): s.search_after() s.count() with raises( ValueError, match="A search must be executed before using search_after" ): s.search_after() @pytest.mark.sync def test_search_after_no_sort(data_client: Elasticsearch) -> None: s = Search(index="flat-git") r = s.execute() with raises( ValueError, match="Cannot use search_after when results are not sorted" ): r.search_after() @pytest.mark.sync def test_search_after_no_results(data_client: Elasticsearch) -> None: s = Search(index="flat-git")[:100].sort("authored_date") r = s.execute() assert 52 == len(r.hits) s = s.search_after() r = s.execute() assert 0 == len(r.hits) with raises( ValueError, match="Cannot use search_after when there are no search results" ): r.search_after() @pytest.mark.sync def test_point_in_time(data_client: Elasticsearch) -> None: page_size = 7 commits = [] with Search(index="flat-git")[:page_size].point_in_time(keep_alive="30s") as s: pit_id = s._extra["pit"]["id"] while True: r = s.execute() commits += r.hits if len(r.hits) < page_size: break s = s.search_after() assert pit_id == s._extra["pit"]["id"] assert "30s" == s._extra["pit"]["keep_alive"] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.sync def test_iterate(data_client: Elasticsearch) -> None: s = Search(index="flat-git") commits = [commit for commit in s.iterate()] assert 52 == len(commits) assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits} @pytest.mark.sync def test_response_is_cached(data_client: Elasticsearch) -> None: s = Repository.search() repos = [repo for repo in s] assert hasattr(s, "_response") assert s._response.hits == repos @pytest.mark.sync def test_multi_search(data_client: Elasticsearch) -> None: s1 = Repository.search() s2 = Search[Repository](index="flat-git") ms = MultiSearch[Repository]() ms = ms.add(s1).add(s2) r1, r2 = ms.execute() assert 1 == len(r1) assert isinstance(r1[0], Repository) assert r1._search is s1 assert 52 == r2.hits.total.value # type: ignore[attr-defined] assert r2._search is s2 @pytest.mark.sync def test_multi_missing(data_client: Elasticsearch) -> None: s1 = Repository.search() s2 = Search[Repository](index="flat-git") s3 = Search[Repository](index="does_not_exist") ms = MultiSearch[Repository]() ms = ms.add(s1).add(s2).add(s3) with raises(ApiError): ms.execute() r1, r2, r3 = ms.execute(raise_on_error=False) assert 1 == len(r1) assert isinstance(r1[0], Repository) assert r1._search is s1 assert 52 == r2.hits.total.value # type: ignore[attr-defined] assert r2._search is s2 assert r3 is None @pytest.mark.sync def test_raw_subfield_can_be_used_in_aggs( data_client: Elasticsearch, ) -> None: s = Search(index="git")[0:0] s.aggs.bucket("authors", "terms", field="author.name.raw", size=1) r = s.execute() authors = r.aggregations.authors assert 1 == len(authors) assert {"key": "Honza Král", "doc_count": 52} == authors[0] test_update_by_query.py000066400000000000000000000047161506101734100346030ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import UpdateByQuery from elasticsearch.dsl.search import Q @pytest.mark.sync def test_update_by_query_no_script( write_client: Elasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( UpdateByQuery(using=write_client) .index(index) .filter(~Q("exists", field="is_public")) ) response = ubq.execute() assert response.total == 52 assert response["took"] > 0 assert not response.timed_out assert response.updated == 52 assert response.deleted == 0 assert response.took > 0 assert response.success() @pytest.mark.sync def test_update_by_query_with_script( write_client: Elasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( UpdateByQuery(using=write_client) .index(index) .filter(~Q("exists", field="parent_shas")) .script(source="ctx._source.is_public = false") ) ubq = ubq.params(conflicts="proceed") response = ubq.execute() assert response.total == 2 assert response.updated == 2 assert response.version_conflicts == 0 @pytest.mark.sync def test_delete_by_query_with_script( write_client: Elasticsearch, setup_ubq_tests: str ) -> None: index = setup_ubq_tests ubq = ( UpdateByQuery(using=write_client) .index(index) .filter(Q("match", parent_shas="1dd19210b5be92b960f7db6f66ae526288edccc3")) .script(source='ctx.op = "delete"') ) ubq = ubq.params(conflicts="proceed") response = ubq.execute() assert response.total == 1 assert response.deleted == 1 assert response.success() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_count.py000066400000000000000000000032751506101734100314750ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any from elasticsearch import Elasticsearch from elasticsearch.dsl.search import Q, Search def test_count_all(data_client: Elasticsearch) -> None: s = Search(using=data_client).index("git") assert 53 == s.count() def test_count_prefetch(data_client: Elasticsearch, mocker: Any) -> None: mocker.spy(data_client, "count") search = Search(using=data_client).index("git") search.execute() assert search.count() == 53 assert data_client.count.call_count == 0 # type: ignore[attr-defined] search._response.hits.total.relation = "gte" # type: ignore[attr-defined] assert search.count() == 53 assert data_client.count.call_count == 1 # type: ignore[attr-defined] def test_count_filter(data_client: Elasticsearch) -> None: s = Search(using=data_client).index("git").filter(~Q("exists", field="parent_shas")) # initial commit + repo document assert 2 == s.count() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_data.py000066400000000000000000001413541506101734100312570ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict from elasticsearch import Elasticsearch user_mapping = { "properties": {"name": {"type": "text", "fields": {"raw": {"type": "keyword"}}}} } FLAT_GIT_INDEX: Dict[str, Any] = { "settings": { # custom analyzer for analyzing file paths "analysis": { "analyzer": { "file_path": { "type": "custom", "tokenizer": "path_hierarchy", "filter": ["lowercase"], } } }, }, "mappings": { "properties": { "description": {"type": "text", "analyzer": "snowball"}, "author": user_mapping, "authored_date": {"type": "date"}, "committer": user_mapping, "committed_date": {"type": "date"}, "parent_shas": {"type": "keyword"}, "files": { "type": "text", "analyzer": "file_path", "fielddata": True, }, } }, } GIT_INDEX: Dict[str, Any] = { "settings": { # custom analyzer for analyzing file paths "analysis": { "analyzer": { "file_path": { "type": "custom", "tokenizer": "path_hierarchy", "filter": ["lowercase"], } } }, }, "mappings": { "properties": { # common fields "description": {"type": "text", "analyzer": "snowball"}, "commit_repo": {"type": "join", "relations": {"repo": "commit"}}, # COMMIT mappings "author": user_mapping, "authored_date": {"type": "date"}, "committer": user_mapping, "committed_date": {"type": "date"}, "parent_shas": {"type": "keyword"}, "files": { "type": "text", "analyzer": "file_path", "fielddata": True, }, # REPO mappings "is_public": {"type": "boolean"}, "owner": user_mapping, "created_at": {"type": "date"}, "tags": {"type": "keyword"}, } }, } def create_flat_git_index(client: Elasticsearch, index: str) -> None: client.indices.create(index=index, body=FLAT_GIT_INDEX) def create_git_index(client: Elasticsearch, index: str) -> None: client.indices.create(index=index, body=GIT_INDEX) DATA = [ # repository { "_id": "elasticsearch-dsl-py", "_source": { "commit_repo": "repo", "organization": "elasticsearch", "created_at": "2014-03-03", "owner": {"name": "elasticsearch"}, "is_public": True, }, "_index": "git", }, # documents { "_id": "3ca6e1e73a071a705b4babd2f581c91a2a3e5037", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_aggs.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 7, "insertions": 23, "lines": 30, "files": 4}, "description": "Make sure buckets aren't modified in-place", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["eb3e543323f189fd7b698e66295427204fff5755"], "committed_date": "2014-05-02T13:47:19", "authored_date": "2014-05-02T13:47:19.123+02:00", }, "_index": "git", }, { "_id": "eb3e543323f189fd7b698e66295427204fff5755", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/search.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 18, "lines": 18, "files": 1}, "description": "Add communication with ES server", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["dd15b6ba17dd9ba16363a51f85b31f66f1fb1157"], "committed_date": "2014-05-01T13:32:14", "authored_date": "2014-05-01T13:32:14", }, "_index": "git", }, { "_id": "dd15b6ba17dd9ba16363a51f85b31f66f1fb1157", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "test_elasticsearch_dsl/test_result.py", "elasticsearch_dsl/result.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 18, "insertions": 44, "lines": 62, "files": 3}, "description": "Minor cleanup and adding helpers for interactive python", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["ed19caf25abd25300e707fadf3f81b05c5673446"], "committed_date": "2014-05-01T13:30:44", "authored_date": "2014-05-01T13:30:44", }, "_index": "git", }, { "_id": "ed19caf25abd25300e707fadf3f81b05c5673446", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 28, "lines": 28, "files": 3}, "description": "Make sure aggs do copy-on-write", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["583e52c71e9a72c1b291ec5843683d8fa8f1ce2d"], "committed_date": "2014-04-27T16:28:09", "authored_date": "2014-04-27T16:28:09", }, "_index": "git", }, { "_id": "583e52c71e9a72c1b291ec5843683d8fa8f1ce2d", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/aggs.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 1, "lines": 2, "files": 1}, "description": "Use __setitem__ from DslBase in AggsBase", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["1dd19210b5be92b960f7db6f66ae526288edccc3"], "committed_date": "2014-04-27T15:51:53", "authored_date": "2014-04-27T15:51:53", }, "_index": "git", }, { "_id": "1dd19210b5be92b960f7db6f66ae526288edccc3", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_search.py", "elasticsearch_dsl/search.py", "elasticsearch_dsl/filter.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 21, "insertions": 98, "lines": 119, "files": 5}, "description": "Have Search clone itself on any change besides aggs", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["b4c9e29376af2e42a4e6dc153f0f293b1a18bac3"], "committed_date": "2014-04-26T14:49:43", "authored_date": "2014-04-26T14:49:43", }, "_index": "git", }, { "_id": "b4c9e29376af2e42a4e6dc153f0f293b1a18bac3", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/test_result.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 5, "lines": 5, "files": 1}, "description": "Add tests for [] on response", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["a64a54181b232bb5943bd16960be9416e402f5f5"], "committed_date": "2014-04-26T13:56:52", "authored_date": "2014-04-26T13:56:52", }, "_index": "git", }, { "_id": "a64a54181b232bb5943bd16960be9416e402f5f5", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/test_result.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 7, "lines": 8, "files": 1}, "description": "Test access to missing fields raises appropriate exceptions", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["df3f778a3d37b170bde6979a4ef2d9e3e6400778"], "committed_date": "2014-04-25T16:01:07", "authored_date": "2014-04-25T16:01:07", }, "_index": "git", }, { "_id": "df3f778a3d37b170bde6979a4ef2d9e3e6400778", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "test_elasticsearch_dsl/test_result.py", "elasticsearch_dsl/result.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 8, "insertions": 31, "lines": 39, "files": 3}, "description": "Support attribute access even for inner/nested objects", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925"], "committed_date": "2014-04-25T15:59:02", "authored_date": "2014-04-25T15:59:02", }, "_index": "git", }, { "_id": "7e599e116b5ff5d271ce3fe1ebc80e82ab3d5925", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "test_elasticsearch_dsl/test_result.py", "elasticsearch_dsl/result.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 149, "lines": 149, "files": 2}, "description": "Added a prototype of a Respose and Result classes", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["e2882d28cb8077eaa3e5d8ae76543482d4d90f7e"], "committed_date": "2014-04-25T15:12:15", "authored_date": "2014-04-25T15:12:15", }, "_index": "git", }, { "_id": "e2882d28cb8077eaa3e5d8ae76543482d4d90f7e", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["docs/index.rst"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1}, "description": "add warning to the docs", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["51f94d83d1c47d3b81207736ca97a1ec6302678f"], "committed_date": "2014-04-22T19:16:21", "authored_date": "2014-04-22T19:16:21", }, "_index": "git", }, { "_id": "51f94d83d1c47d3b81207736ca97a1ec6302678f", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/utils.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 3, "insertions": 29, "lines": 32, "files": 1}, "description": "Add some comments to the code", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["0950f6c600b49e2bf012d03b02250fb71c848555"], "committed_date": "2014-04-22T19:12:06", "authored_date": "2014-04-22T19:12:06", }, "_index": "git", }, { "_id": "0950f6c600b49e2bf012d03b02250fb71c848555", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["README.rst"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 6, "lines": 6, "files": 1}, "description": "Added a WIP warning", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["54d058f5ac6be8225ef61d5529772aada42ec6c8"], "committed_date": "2014-04-20T00:19:25", "authored_date": "2014-04-20T00:19:25", }, "_index": "git", }, { "_id": "54d058f5ac6be8225ef61d5529772aada42ec6c8", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/__init__.py", "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 36, "insertions": 7, "lines": 43, "files": 3}, "description": "Remove the operator kwarg from .query", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["4cb07845e45787abc1f850c0b561e487e0034424"], "committed_date": "2014-04-20T00:17:25", "authored_date": "2014-04-20T00:17:25", }, "_index": "git", }, { "_id": "4cb07845e45787abc1f850c0b561e487e0034424", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 35, "insertions": 49, "lines": 84, "files": 2}, "description": "Complex example", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["578abe80f76aafd7e81fe46a44403e601733a938"], "committed_date": "2014-03-24T20:48:45", "authored_date": "2014-03-24T20:48:45", }, "_index": "git", }, { "_id": "578abe80f76aafd7e81fe46a44403e601733a938", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/test_search.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 2, "insertions": 0, "lines": 2, "files": 1}, "description": "removing extra whitespace", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["ecb84f03565940c7d294dbc80723420dcfbab340"], "committed_date": "2014-03-24T20:42:23", "authored_date": "2014-03-24T20:42:23", }, "_index": "git", }, { "_id": "ecb84f03565940c7d294dbc80723420dcfbab340", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/test_search.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 3, "lines": 4, "files": 1}, "description": "Make sure attribute access works for .query on Search", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["9a247c876ab66e2bca56b25f392d054e613b1b2a"], "committed_date": "2014-03-24T20:35:02", "authored_date": "2014-03-24T20:34:46", }, "_index": "git", }, { "_id": "9a247c876ab66e2bca56b25f392d054e613b1b2a", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/search.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 2, "lines": 2, "files": 1}, "description": "Make sure .index and .doc_type methods are chainable", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["cee5e46947d510a49edd3609ff91aab7b1f3ac89"], "committed_date": "2014-03-24T20:27:46", "authored_date": "2014-03-24T20:27:46", }, "_index": "git", }, { "_id": "cee5e46947d510a49edd3609ff91aab7b1f3ac89", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", "elasticsearch_dsl/filter.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 13, "insertions": 128, "lines": 141, "files": 3}, "description": "Added .filter and .post_filter to Search", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["1d6857182b09a556d58c6bc5bdcb243092812ba3"], "committed_date": "2014-03-24T20:26:57", "authored_date": "2014-03-24T20:26:57", }, "_index": "git", }, { "_id": "1d6857182b09a556d58c6bc5bdcb243092812ba3", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 24, "insertions": 29, "lines": 53, "files": 2}, "description": "Extracted combination logic into DslBase", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["4ad92f15a1955846c01642318303a821e8435b75"], "committed_date": "2014-03-24T20:03:51", "authored_date": "2014-03-24T20:03:51", }, "_index": "git", }, { "_id": "4ad92f15a1955846c01642318303a821e8435b75", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 43, "insertions": 45, "lines": 88, "files": 2}, "description": "Extracted bool-related logic to a mixin to be reused by filters", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["6eb39dc2825605543ac1ed0b45b9b6baeecc44c2"], "committed_date": "2014-03-24T19:16:16", "authored_date": "2014-03-24T19:16:16", }, "_index": "git", }, { "_id": "6eb39dc2825605543ac1ed0b45b9b6baeecc44c2", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 32, "lines": 33, "files": 2}, "description": "Enable otheroperators when querying on Search object", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["be094c7b307332cb6039bf9a7c984d2c7593ddff"], "committed_date": "2014-03-24T18:25:10", "authored_date": "2014-03-24T18:25:10", }, "_index": "git", }, { "_id": "be094c7b307332cb6039bf9a7c984d2c7593ddff", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 23, "insertions": 35, "lines": 58, "files": 3}, "description": "make sure query operations always return copies", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["b2576e3b6437e2cb9d8971fee4ead60df91fd75b"], "committed_date": "2014-03-24T18:10:37", "authored_date": "2014-03-24T18:03:13", }, "_index": "git", }, { "_id": "b2576e3b6437e2cb9d8971fee4ead60df91fd75b", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 53, "lines": 54, "files": 2}, "description": "Adding or operator for queries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["1be002170ac3cd59d2e97824b83b88bb3c9c60ed"], "committed_date": "2014-03-24T17:53:38", "authored_date": "2014-03-24T17:53:38", }, "_index": "git", }, { "_id": "1be002170ac3cd59d2e97824b83b88bb3c9c60ed", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 35, "lines": 35, "files": 2}, "description": "Added inverting of queries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["24e1e38b2f704f65440d96c290b7c6cd54c2e00e"], "committed_date": "2014-03-23T17:44:36", "authored_date": "2014-03-23T17:44:36", }, "_index": "git", }, { "_id": "24e1e38b2f704f65440d96c290b7c6cd54c2e00e", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/utils.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 5, "insertions": 1, "lines": 6, "files": 2}, "description": "Change equality checks to use .to_dict()", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["277cfaedbaf3705ed74ad6296227e1172c97a63f"], "committed_date": "2014-03-23T17:43:01", "authored_date": "2014-03-23T17:43:01", }, "_index": "git", }, { "_id": "277cfaedbaf3705ed74ad6296227e1172c97a63f", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 11, "lines": 12, "files": 2}, "description": "Test combining of bool queries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["6aa3868a6a9f35f71553ce96f9d3d63c74d054fd"], "committed_date": "2014-03-21T15:15:06", "authored_date": "2014-03-21T15:15:06", }, "_index": "git", }, { "_id": "6aa3868a6a9f35f71553ce96f9d3d63c74d054fd", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 23, "lines": 24, "files": 2}, "description": "Adding & operator for queries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["bb311eb35e7eb53fb5ae01e3f80336866c7e3e37"], "committed_date": "2014-03-21T15:10:08", "authored_date": "2014-03-21T15:10:08", }, "_index": "git", }, { "_id": "bb311eb35e7eb53fb5ae01e3f80336866c7e3e37", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 1, "insertions": 4, "lines": 5, "files": 2}, "description": "Don't serialize empty typed fields into dict", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["aea8ea9e421bd53a5b058495e68c3fd57bb1dacc"], "committed_date": "2014-03-15T16:29:37", "authored_date": "2014-03-15T16:29:37", }, "_index": "git", }, { "_id": "aea8ea9e421bd53a5b058495e68c3fd57bb1dacc", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 3, "insertions": 37, "lines": 40, "files": 3}, "description": "Bool queries, when combining just adds their params together", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["a8819a510b919be43ff3011b904f257798fb8916"], "committed_date": "2014-03-15T16:16:40", "authored_date": "2014-03-15T16:16:40", }, "_index": "git", }, { "_id": "a8819a510b919be43ff3011b904f257798fb8916", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/run_tests.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 6, "insertions": 2, "lines": 8, "files": 1}, "description": "Simpler run_tests.py", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["e35792a725be2325fc54d3fcb95a7d38d8075a99"], "committed_date": "2014-03-15T16:02:21", "authored_date": "2014-03-15T16:02:21", }, "_index": "git", }, { "_id": "e35792a725be2325fc54d3fcb95a7d38d8075a99", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/aggs.py", "elasticsearch_dsl/query.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 2}, "description": "Maku we don't treat shortcuts as methods.", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc"], "committed_date": "2014-03-15T15:59:21", "authored_date": "2014-03-15T15:59:21", }, "_index": "git", }, { "_id": "3179d778dc9e3f2883d5f7ffa63b9ae0399c16bc", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/query.py", "elasticsearch_dsl/utils.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 9, "insertions": 5, "lines": 14, "files": 3}, "description": "Centralize == of Dsl objects", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["b5e7d0c4b284211df8f7b464fcece93a27a802fb"], "committed_date": "2014-03-10T21:37:24", "authored_date": "2014-03-10T21:37:24", }, "_index": "git", }, { "_id": "b5e7d0c4b284211df8f7b464fcece93a27a802fb", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", "elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_aggs.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 75, "insertions": 115, "lines": 190, "files": 6}, "description": "Experimental draft with more declarative DSL", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["0fe741b43adee5ca1424584ddd3f35fa33f8733c"], "committed_date": "2014-03-10T21:34:39", "authored_date": "2014-03-10T21:34:39", }, "_index": "git", }, { "_id": "0fe741b43adee5ca1424584ddd3f35fa33f8733c", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["test_elasticsearch_dsl/test_search.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 2, "insertions": 2, "lines": 4, "files": 1}, "description": "Make sure .query is chainable", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["a22be5933d4b022cbacee867b1aece120208edf3"], "committed_date": "2014-03-07T17:41:59", "authored_date": "2014-03-07T17:41:59", }, "_index": "git", }, { "_id": "a22be5933d4b022cbacee867b1aece120208edf3", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 14, "insertions": 44, "lines": 58, "files": 3}, "description": "Search now does aggregations", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["e823686aacfc4bdcb34ffdab337a26fa09659a9a"], "committed_date": "2014-03-07T17:29:55", "authored_date": "2014-03-07T17:29:55", }, "_index": "git", }, { "_id": "e823686aacfc4bdcb34ffdab337a26fa09659a9a", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [".gitignore"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 1, "lines": 1, "files": 1}, "description": "Ignore html coverage report", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["e0aedb3011c71d704deec03a8f32b2b360d6e364"], "committed_date": "2014-03-07T17:03:23", "authored_date": "2014-03-07T17:03:23", }, "_index": "git", }, { "_id": "e0aedb3011c71d704deec03a8f32b2b360d6e364", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/aggs.py", "test_elasticsearch_dsl/test_aggs.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 228, "lines": 228, "files": 2}, "description": "Added aggregation DSL objects", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd"], "committed_date": "2014-03-07T16:25:55", "authored_date": "2014-03-07T16:25:55", }, "_index": "git", }, { "_id": "61cbc0aa62a0b776ae5e333406659dbb2f5cfbbd", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 12, "insertions": 7, "lines": 19, "files": 2}, "description": "Only retrieve DslClass, leave the instantiation to the caller", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["647f1017a7b17a913e07af70a3b03202f6adbdfd"], "committed_date": "2014-03-07T15:27:43", "authored_date": "2014-03-07T15:27:43", }, "_index": "git", }, { "_id": "647f1017a7b17a913e07af70a3b03202f6adbdfd", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "test_elasticsearch_dsl/test_search.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 19, "insertions": 19, "lines": 38, "files": 3}, "description": "No need to replicate Query suffix when in query namespace", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d"], "committed_date": "2014-03-07T15:19:01", "authored_date": "2014-03-07T15:19:01", }, "_index": "git", }, { "_id": "7c4f94ecdb38f0e91c7ee52f579c0ea148afcc7d", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["elasticsearch_dsl/utils.py"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 2, "insertions": 3, "lines": 5, "files": 1}, "description": "Ask forgiveness, not permission", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["c10793c2ca43688195e415b25b674ff34d58eaff"], "committed_date": "2014-03-07T15:13:22", "authored_date": "2014-03-07T15:13:22", }, "_index": "git", }, { "_id": "c10793c2ca43688195e415b25b674ff34d58eaff", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/utils.py", "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 24, "insertions": 27, "lines": 51, "files": 3}, "description": "Extract DSL object registration to DslMeta", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["d8867fdb17fcf4c696657740fa08d29c36adc6ec"], "committed_date": "2014-03-07T15:12:13", "authored_date": "2014-03-07T15:10:31", }, "_index": "git", }, { "_id": "d8867fdb17fcf4c696657740fa08d29c36adc6ec", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2}, "description": "Search.to_dict", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["2eb7cd980d917ed6f4a4dd8e246804f710ec5082"], "committed_date": "2014-03-07T02:58:33", "authored_date": "2014-03-07T02:58:33", }, "_index": "git", }, { "_id": "2eb7cd980d917ed6f4a4dd8e246804f710ec5082", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/search.py", "test_elasticsearch_dsl/test_search.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 113, "lines": 113, "files": 2}, "description": "Basic Search object", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["11708576f9118e0dbf27ae1f8a7b799cf281b511"], "committed_date": "2014-03-06T21:02:03", "authored_date": "2014-03-06T21:01:05", }, "_index": "git", }, { "_id": "11708576f9118e0dbf27ae1f8a7b799cf281b511", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 13, "lines": 13, "files": 2}, "description": "MatchAll query + anything is anything", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["1dc496e5c7c1b2caf290df477fca2db61ebe37e0"], "committed_date": "2014-03-06T20:40:39", "authored_date": "2014-03-06T20:39:52", }, "_index": "git", }, { "_id": "1dc496e5c7c1b2caf290df477fca2db61ebe37e0", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 53, "lines": 53, "files": 2}, "description": "From_dict, Q(dict) and bool query parses it's subqueries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["d407f99d1959b7b862a541c066d9fd737ce913f3"], "committed_date": "2014-03-06T20:24:30", "authored_date": "2014-03-06T20:24:30", }, "_index": "git", }, { "_id": "d407f99d1959b7b862a541c066d9fd737ce913f3", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": ["CONTRIBUTING.md", "README.rst"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 6, "insertions": 21, "lines": 27, "files": 2}, "description": "Housekeeping - licence and updated generic CONTRIBUTING.md", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["277e8ecc7395754d1ba1f2411ec32337a3e9d73f"], "committed_date": "2014-03-05T16:21:44", "authored_date": "2014-03-05T16:21:44", }, "_index": "git", }, { "_id": "277e8ecc7395754d1ba1f2411ec32337a3e9d73f", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "setup.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 59, "lines": 59, "files": 3}, "description": "Automatic query registration and Q function", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["8f1e34bd8f462fec50bcc10971df2d57e2986604"], "committed_date": "2014-03-05T16:18:52", "authored_date": "2014-03-05T16:18:52", }, "_index": "git", }, { "_id": "8f1e34bd8f462fec50bcc10971df2d57e2986604", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/query.py", "test_elasticsearch_dsl/test_query.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 54, "lines": 54, "files": 2}, "description": "Initial implementation of match and bool queries", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["fcff47ddcc6d08be5739d03dd30f504fb9db2608"], "committed_date": "2014-03-05T15:55:06", "authored_date": "2014-03-05T15:55:06", }, "_index": "git", }, { "_id": "fcff47ddcc6d08be5739d03dd30f504fb9db2608", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "docs/Makefile", "CONTRIBUTING.md", "docs/conf.py", "LICENSE", "Changelog.rst", "docs/index.rst", "docs/Changelog.rst", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 692, "lines": 692, "files": 7}, "description": "Docs template", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["febe8127ae48fcc81778c0fb2d628f1bcc0a0350"], "committed_date": "2014-03-04T01:42:31", "authored_date": "2014-03-04T01:42:31", }, "_index": "git", }, { "_id": "febe8127ae48fcc81778c0fb2d628f1bcc0a0350", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [ "elasticsearch_dsl/__init__.py", "test_elasticsearch_dsl/run_tests.py", "setup.py", "README.rst", "test_elasticsearch_dsl/__init__.py", ], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 82, "lines": 82, "files": 5}, "description": "Empty project structure", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": ["2a8f1ce89760bfc72808f3945b539eae650acac9"], "committed_date": "2014-03-04T01:37:49", "authored_date": "2014-03-03T18:23:55", }, "_index": "git", }, { "_id": "2a8f1ce89760bfc72808f3945b539eae650acac9", "routing": "elasticsearch-dsl-py", "_source": { "commit_repo": {"name": "commit", "parent": "elasticsearch-dsl-py"}, "files": [".gitignore"], "committer": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "stats": {"deletions": 0, "insertions": 9, "lines": 9, "files": 1}, "description": "Initial commit, .gitignore", "author": {"name": "Honza Kr\xe1l", "email": "honza.kral@gmail.com"}, "parent_shas": [], "committed_date": "2014-03-03T18:15:05", "authored_date": "2014-03-03T18:15:05", }, "_index": "git", }, ] def flatten_doc(d: Dict[str, Any]) -> Dict[str, Any]: src = d["_source"].copy() del src["commit_repo"] return {"_index": "flat-git", "_id": d["_id"], "_source": src} FLAT_DATA = [flatten_doc(d) for d in DATA if "routing" in d] def create_test_git_data(d: Dict[str, Any]) -> Dict[str, Any]: src = d["_source"].copy() return { "_index": "test-git", "routing": "elasticsearch-dsl-py", "_id": d["_id"], "_source": src, } TEST_GIT_DATA = [create_test_git_data(d) for d in DATA] python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/000077500000000000000000000000001506101734100316025ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/__init__.py000066400000000000000000000014231506101734100337130ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async/000077500000000000000000000000001506101734100330565ustar00rootroot00000000000000__init__.py000066400000000000000000000014231506101734100351100ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. test_alias_migration.py000066400000000000000000000050611506101734100375540ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from ..async_examples import alias_migration from ..async_examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate @pytest.mark.asyncio async def test_alias_migration(async_write_client: AsyncElasticsearch) -> None: # create the index await alias_migration.setup() # verify that template, index, and alias has been set up assert await async_write_client.indices.exists_index_template(name=ALIAS) assert await async_write_client.indices.exists(index=PATTERN) assert await async_write_client.indices.exists_alias(name=ALIAS) indices = await async_write_client.indices.get(index=PATTERN) assert len(indices) == 1 index_name, _ = indices.popitem() # which means we can now save a document with open(__file__) as f: bp = BlogPost( _id=0, title="Hello World!", tags=["testing", "dummy"], content=f.read(), published=None, ) await bp.save(refresh=True) assert await BlogPost.search().count() == 1 # _matches work which means we get BlogPost instance bp = (await BlogPost.search().execute())[0] assert isinstance(bp, BlogPost) assert not bp.is_published() assert "0" == bp.meta.id # create new index await migrate() indices = await async_write_client.indices.get(index=PATTERN) assert 2 == len(indices) alias = await async_write_client.indices.get(index=ALIAS) assert 1 == len(alias) assert index_name not in alias # data has been moved properly assert await BlogPost.search().count() == 1 # _matches work which means we get BlogPost instance bp = (await BlogPost.search().execute())[0] assert isinstance(bp, BlogPost) assert "0" == bp.meta.id test_completion.py000066400000000000000000000026201506101734100365610ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from ..async_examples.completion import Person @pytest.mark.asyncio async def test_person_suggests_on_all_variants_of_name( async_write_client: AsyncElasticsearch, ) -> None: await Person.init(using=async_write_client) await Person(_id=None, name="Honza Král", popularity=42).save(refresh=True) s = Person.search().suggest("t", "kra", completion={"field": "suggest"}) response = await s.execute() opts = response.suggest["t"][0].options assert 1 == len(opts) assert opts[0]._score == 42 assert opts[0]._source.name == "Honza Král" test_composite_aggs.py000066400000000000000000000034221506101734100374140ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import A, AsyncSearch from ..async_examples.composite_agg import scan_aggs @pytest.mark.asyncio async def test_scan_aggs_exhausts_all_files( async_data_client: AsyncElasticsearch, ) -> None: s = AsyncSearch(index="flat-git") key_aggs = [{"files": A("terms", field="files")}] file_list = [f async for f in scan_aggs(s, key_aggs)] assert len(file_list) == 26 @pytest.mark.asyncio async def test_scan_aggs_with_multiple_aggs( async_data_client: AsyncElasticsearch, ) -> None: s = AsyncSearch(index="flat-git") key_aggs = [ {"files": A("terms", field="files")}, { "months": A( "date_histogram", field="committed_date", calendar_interval="month" ) }, ] file_list = [ f async for f in scan_aggs( s, key_aggs, {"first_seen": A("min", field="committed_date")} ) ] assert len(file_list) == 47 test_parent_child.py000066400000000000000000000065101506101734100370460ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import pytest import pytest_asyncio from elasticsearch import AsyncElasticsearch from elasticsearch.dsl import Q from ..async_examples.parent_child import Answer, Comment, Question, User, setup honza = User( id=42, signed_up=datetime(2013, 4, 3), username="honzakral", email="honza@elastic.co", location="Prague", ) nick = User( id=47, signed_up=datetime(2017, 4, 3), username="fxdgear", email="nick.lang@elastic.co", location="Colorado", ) @pytest_asyncio.fixture async def question(async_write_client: AsyncElasticsearch) -> Question: await setup() assert await async_write_client.indices.exists_index_template(name="base") # create a question object q = Question( _id=1, author=nick, tags=["elasticsearch", "python"], title="How do I use elasticsearch from Python?", body=""" I want to use elasticsearch, how do I do it from Python? """, created=None, question_answer=None, comments=[], ) await q.save() return q @pytest.mark.asyncio async def test_comment( async_write_client: AsyncElasticsearch, question: Question ) -> None: await question.add_comment(nick, "Just use elasticsearch-py") q = await Question.get(1) # type: ignore[arg-type] assert isinstance(q, Question) assert 1 == len(q.comments) c = q.comments[0] assert isinstance(c, Comment) assert c.author.username == "fxdgear" @pytest.mark.asyncio async def test_question_answer( async_write_client: AsyncElasticsearch, question: Question ) -> None: a = await question.add_answer(honza, "Just use `elasticsearch-py`!") assert isinstance(a, Answer) # refresh the index so we can search right away await Question._index.refresh() # we can now fetch answers from elasticsearch answers = await question.get_answers() assert 1 == len(answers) assert isinstance(answers[0], Answer) search = Question.search().query( "has_child", type="answer", inner_hits={}, query=Q("term", author__username__keyword="honzakral"), ) response = await search.execute() assert 1 == len(response.hits) q = response.hits[0] assert isinstance(q, Question) assert 1 == len(q.meta.inner_hits.answer.hits) assert q.meta.inner_hits.answer.hits is await q.get_answers() a = q.meta.inner_hits.answer.hits[0] assert isinstance(a, Answer) assert isinstance(await a.get_question(), Question) assert (await a.get_question()).meta.id == "1" test_percolate.py000066400000000000000000000024521506101734100363710ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import AsyncElasticsearch from ..async_examples.percolate import BlogPost, setup @pytest.mark.asyncio async def test_post_gets_tagged_automatically( async_write_client: AsyncElasticsearch, ) -> None: await setup() bp = BlogPost(_id=47, content="nothing about snakes here!") bp_py = BlogPost(_id=42, content="something about Python here!") await bp.save() await bp_py.save() assert [] == bp.tags assert {"programming", "development", "python"} == set(bp_py.tags) test_vectors.py000066400000000000000000000045601506101734100361020ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_async# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys from hashlib import md5 from typing import Any, List, Tuple from unittest import SkipTest from unittest.mock import Mock, patch import pytest from elasticsearch import AsyncElasticsearch @pytest.mark.asyncio async def test_vector_search( async_write_client: AsyncElasticsearch, es_version: Tuple[int, ...] ) -> None: # this test only runs on Elasticsearch >= 8.11 because the example uses # a dense vector without specifying an explicit size if es_version < (8, 11): raise SkipTest("This test requires Elasticsearch 8.11 or newer") class MockSentenceTransformer: def __init__(self, model: Any): pass def encode(self, text: str) -> List[float]: vector = [int(ch) for ch in md5(text.encode()).digest()] total = sum(vector) return [float(v) / total for v in vector] def mock_nltk_tokenize(content: str): return content.split("\n") # mock sentence_transformers and nltk, because they are quite big and # irrelevant for testing the example logic with patch.dict( sys.modules, { "sentence_transformers": Mock(SentenceTransformer=MockSentenceTransformer), "nltk": Mock(sent_tokenize=mock_nltk_tokenize), }, ): # import the example after the dependencies are mocked from ..async_examples import vectors await vectors.create() await vectors.WorkplaceDoc._index.refresh() results = await (await vectors.search("Welcome to our team!")).execute() assert results[0].name == "Intellectual Property Policy" python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync/000077500000000000000000000000001506101734100327155ustar00rootroot00000000000000__init__.py000066400000000000000000000014231506101734100347470ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. test_alias_migration.py000066400000000000000000000046321506101734100374160ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from ..examples import alias_migration from ..examples.alias_migration import ALIAS, PATTERN, BlogPost, migrate @pytest.mark.sync def test_alias_migration(write_client: Elasticsearch) -> None: # create the index alias_migration.setup() # verify that template, index, and alias has been set up assert write_client.indices.exists_index_template(name=ALIAS) assert write_client.indices.exists(index=PATTERN) assert write_client.indices.exists_alias(name=ALIAS) indices = write_client.indices.get(index=PATTERN) assert len(indices) == 1 index_name, _ = indices.popitem() # which means we can now save a document with open(__file__) as f: bp = BlogPost( _id=0, title="Hello World!", tags=["testing", "dummy"], content=f.read(), published=None, ) bp.save(refresh=True) assert BlogPost.search().count() == 1 # _matches work which means we get BlogPost instance bp = (BlogPost.search().execute())[0] assert isinstance(bp, BlogPost) assert not bp.is_published() assert "0" == bp.meta.id # create new index migrate() indices = write_client.indices.get(index=PATTERN) assert 2 == len(indices) alias = write_client.indices.get(index=ALIAS) assert 1 == len(alias) assert index_name not in alias # data has been moved properly assert BlogPost.search().count() == 1 # _matches work which means we get BlogPost instance bp = (BlogPost.search().execute())[0] assert isinstance(bp, BlogPost) assert "0" == bp.meta.id test_completion.py000066400000000000000000000025311506101734100364210ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from ..examples.completion import Person @pytest.mark.sync def test_person_suggests_on_all_variants_of_name( write_client: Elasticsearch, ) -> None: Person.init(using=write_client) Person(_id=None, name="Honza Král", popularity=42).save(refresh=True) s = Person.search().suggest("t", "kra", completion={"field": "suggest"}) response = s.execute() opts = response.suggest["t"][0].options assert 1 == len(opts) assert opts[0]._score == 42 assert opts[0]._source.name == "Honza Král" test_composite_aggs.py000066400000000000000000000033041506101734100372520ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import A, Search from ..examples.composite_agg import scan_aggs @pytest.mark.sync def test_scan_aggs_exhausts_all_files( data_client: Elasticsearch, ) -> None: s = Search(index="flat-git") key_aggs = [{"files": A("terms", field="files")}] file_list = [f for f in scan_aggs(s, key_aggs)] assert len(file_list) == 26 @pytest.mark.sync def test_scan_aggs_with_multiple_aggs( data_client: Elasticsearch, ) -> None: s = Search(index="flat-git") key_aggs = [ {"files": A("terms", field="files")}, { "months": A( "date_histogram", field="committed_date", calendar_interval="month" ) }, ] file_list = [ f for f in scan_aggs( s, key_aggs, {"first_seen": A("min", field="committed_date")} ) ] assert len(file_list) == 47 test_parent_child.py000066400000000000000000000062141506101734100367060ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime import pytest from elasticsearch import Elasticsearch from elasticsearch.dsl import Q from ..examples.parent_child import Answer, Comment, Question, User, setup honza = User( id=42, signed_up=datetime(2013, 4, 3), username="honzakral", email="honza@elastic.co", location="Prague", ) nick = User( id=47, signed_up=datetime(2017, 4, 3), username="fxdgear", email="nick.lang@elastic.co", location="Colorado", ) @pytest.fixture def question(write_client: Elasticsearch) -> Question: setup() assert write_client.indices.exists_index_template(name="base") # create a question object q = Question( _id=1, author=nick, tags=["elasticsearch", "python"], title="How do I use elasticsearch from Python?", body=""" I want to use elasticsearch, how do I do it from Python? """, created=None, question_answer=None, comments=[], ) q.save() return q @pytest.mark.sync def test_comment(write_client: Elasticsearch, question: Question) -> None: question.add_comment(nick, "Just use elasticsearch-py") q = Question.get(1) # type: ignore[arg-type] assert isinstance(q, Question) assert 1 == len(q.comments) c = q.comments[0] assert isinstance(c, Comment) assert c.author.username == "fxdgear" @pytest.mark.sync def test_question_answer(write_client: Elasticsearch, question: Question) -> None: a = question.add_answer(honza, "Just use `elasticsearch-py`!") assert isinstance(a, Answer) # refresh the index so we can search right away Question._index.refresh() # we can now fetch answers from elasticsearch answers = question.get_answers() assert 1 == len(answers) assert isinstance(answers[0], Answer) search = Question.search().query( "has_child", type="answer", inner_hits={}, query=Q("term", author__username__keyword="honzakral"), ) response = search.execute() assert 1 == len(response.hits) q = response.hits[0] assert isinstance(q, Question) assert 1 == len(q.meta.inner_hits.answer.hits) assert q.meta.inner_hits.answer.hits is q.get_answers() a = q.meta.inner_hits.answer.hits[0] assert isinstance(a, Answer) assert isinstance(a.get_question(), Question) assert (a.get_question()).meta.id == "1" test_percolate.py000066400000000000000000000023711506101734100362300ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import Elasticsearch from ..examples.percolate import BlogPost, setup @pytest.mark.sync def test_post_gets_tagged_automatically( write_client: Elasticsearch, ) -> None: setup() bp = BlogPost(_id=47, content="nothing about snakes here!") bp_py = BlogPost(_id=42, content="something about Python here!") bp.save() bp_py.save() assert [] == bp.tags assert {"programming", "development", "python"} == set(bp_py.tags) test_vectors.py000066400000000000000000000044711506101734100357420ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/_sync# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys from hashlib import md5 from typing import Any, List, Tuple from unittest import SkipTest from unittest.mock import Mock, patch import pytest from elasticsearch import Elasticsearch @pytest.mark.sync def test_vector_search( write_client: Elasticsearch, es_version: Tuple[int, ...] ) -> None: # this test only runs on Elasticsearch >= 8.11 because the example uses # a dense vector without specifying an explicit size if es_version < (8, 11): raise SkipTest("This test requires Elasticsearch 8.11 or newer") class MockSentenceTransformer: def __init__(self, model: Any): pass def encode(self, text: str) -> List[float]: vector = [int(ch) for ch in md5(text.encode()).digest()] total = sum(vector) return [float(v) / total for v in vector] def mock_nltk_tokenize(content: str): return content.split("\n") # mock sentence_transformers and nltk, because they are quite big and # irrelevant for testing the example logic with patch.dict( sys.modules, { "sentence_transformers": Mock(SentenceTransformer=MockSentenceTransformer), "nltk": Mock(sent_tokenize=mock_nltk_tokenize), }, ): # import the example after the dependencies are mocked from ..examples import vectors vectors.create() vectors.WorkplaceDoc._index.refresh() results = (vectors.search("Welcome to our team!")).execute() assert results[0].name == "Intellectual Property Policy" python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/async_examples000077700000000000000000000000001506101734100412472../../../../examples/dsl/asyncustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_integration/test_examples/examples000077700000000000000000000000001506101734100367352../../../../examples/dslustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_package.py000066400000000000000000000016351506101734100263540ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import elasticsearch.dsl def test__all__is_sorted() -> None: assert elasticsearch.dsl.__all__ == sorted(elasticsearch.dsl.__all__) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_query.py000066400000000000000000000441201506101734100261220ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from pytest import raises from elasticsearch.dsl import function, query, utils def test_empty_Q_is_match_all() -> None: q = query.Q() assert isinstance(q, query.MatchAll) assert query.MatchAll() == q def test_combined_fields_to_dict() -> None: assert { "combined_fields": { "query": "this is a test", "fields": ["name", "body", "description"], "operator": "and", }, } == query.CombinedFields( query="this is a test", fields=["name", "body", "description"], operator="and", ).to_dict() def test_combined_fields_to_dict_extra() -> None: assert { "combined_fields": { "query": "this is a test", "fields": ["name", "body^2"], "operator": "or", }, } == query.CombinedFields( query="this is a test", fields=["name", "body^2"], operator="or", ).to_dict() def test_match_to_dict() -> None: assert {"match": {"f": "value"}} == query.Match(f="value").to_dict() def test_match_to_dict_extra() -> None: assert {"match": {"f": "value", "boost": 2}} == query.Match( f="value", boost=2 ).to_dict() def test_fuzzy_to_dict() -> None: assert {"fuzzy": {"f": "value"}} == query.Fuzzy(f="value").to_dict() def test_prefix_to_dict() -> None: assert {"prefix": {"f": "value"}} == query.Prefix(f="value").to_dict() def test_term_to_dict() -> None: assert {"term": {"_type": "article"}} == query.Term(_type="article").to_dict() def test_terms_to_dict() -> None: assert {"terms": {"_type": ["article", "section"]}} == query.Terms( _type=["article", "section"] ).to_dict() assert {"terms": {"_type": ["article", "section"], "boost": 1.1}} == query.Terms( _type=("article", "section"), boost=1.1 ).to_dict() assert {"terms": {"_type": "article", "boost": 1.1}} == query.Terms( _type="article", boost=1.1 ).to_dict() assert { "terms": {"_id": {"index": "my-other-index", "id": "my-id"}, "boost": 1.1} } == query.Terms( _id={"index": "my-other-index", "id": "my-id"}, boost=1.1 ).to_dict() def test_bool_to_dict() -> None: bool = query.Bool(must=[query.Match(f="value")], should=[]) assert {"bool": {"must": [{"match": {"f": "value"}}]}} == bool.to_dict() def test_dismax_to_dict() -> None: assert {"dis_max": {"queries": [{"term": {"_type": "article"}}]}} == query.DisMax( queries=[query.Term(_type="article")] ).to_dict() def test_bool_from_dict_issue_318() -> None: d = {"bool": {"must_not": {"match": {"field": "value"}}}} q = query.Q(d) assert q == ~query.Match(field="value") def test_repr() -> None: bool = query.Bool(must=[query.Match(f="value")], should=[]) assert "Bool(must=[Match(f='value')])" == repr(bool) def test_query_clone() -> None: bool = query.Bool( must=[query.Match(x=42)], should=[query.Match(g="v2")], must_not=[query.Match(title="value")], ) bool_clone = bool._clone() assert bool == bool_clone assert bool is not bool_clone def test_bool_converts_its_init_args_to_queries() -> None: q = query.Bool(must=[{"match": {"f": "value"}}]) # type: ignore assert len(q.must) == 1 assert q.must[0] == query.Match(f="value") def test_two_queries_make_a_bool() -> None: q1 = query.Match(f="value1") q2 = query.Match(message={"query": "this is a test", "opeartor": "and"}) q = q1 & q2 assert isinstance(q, query.Bool) assert [q1, q2] == q.must def test_other_and_bool_appends_other_to_must() -> None: q1 = query.Match(f="value1") qb = query.Bool() q = q1 & qb assert q is not qb assert q.must[0] == q1 def test_bool_and_other_appends_other_to_must() -> None: q1 = query.Match(f="value1") qb = query.Bool() q = qb & q1 assert q is not qb assert q.must[0] == q1 def test_bool_and_other_sets_min_should_match_if_needed() -> None: q1 = query.Q("term", category=1) q2 = query.Q( "bool", should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")] ) q = q1 & q2 assert q == query.Bool( must=[q1], should=[query.Q("term", name="aaa"), query.Q("term", name="bbb")], minimum_should_match=1, ) def test_bool_with_different_minimum_should_match_should_not_be_combined() -> None: q1 = query.Q( "bool", minimum_should_match=2, should=[ query.Q("term", field="aa1"), query.Q("term", field="aa2"), query.Q("term", field="aa3"), query.Q("term", field="aa4"), ], ) q2 = query.Q( "bool", minimum_should_match=3, should=[ query.Q("term", field="bb1"), query.Q("term", field="bb2"), query.Q("term", field="bb3"), query.Q("term", field="bb4"), ], ) q3 = query.Q( "bool", minimum_should_match=4, should=[ query.Q("term", field="cc1"), query.Q("term", field="cc2"), query.Q("term", field="cc3"), query.Q("term", field="cc4"), ], ) q4 = q1 | q2 assert q4 == query.Bool(should=[q1, q2]) q5 = q1 | q2 | q3 assert q5 == query.Bool(should=[q1, q2, q3]) def test_empty_bool_has_min_should_match_0() -> None: assert 0 == query.Bool()._min_should_match def test_query_and_query_creates_bool() -> None: q1 = query.Match(f=42) q2 = query.Match(g=47) q = q1 & q2 assert isinstance(q, query.Bool) assert q.must == [q1, q2] def test_match_all_and_query_equals_other() -> None: q1 = query.Match(f=42) q2 = query.MatchAll() q = q1 & q2 assert q1 == q def test_not_match_all_is_match_none() -> None: q = query.MatchAll() assert ~q == query.MatchNone() def test_not_match_none_is_match_all() -> None: q = query.MatchNone() assert ~q == query.MatchAll() def test_invert_empty_bool_is_match_none() -> None: q = query.Bool() assert ~q == query.MatchNone() def test_match_none_or_query_equals_query() -> None: q1 = query.Match(f=42) q2 = query.MatchNone() assert q1 | q2 == query.Match(f=42) def test_match_none_and_query_equals_match_none() -> None: q1 = query.Match(f=42) q2 = query.MatchNone() assert q1 & q2 == query.MatchNone() def test_bool_and_bool() -> None: qt1, qt2, qt3 = query.Match(f=1), query.Match(f=2), query.Match(f=3) q1 = query.Bool(must=[qt1], should=[qt2]) q2 = query.Bool(must_not=[qt3]) assert q1 & q2 == query.Bool( must=[qt1], must_not=[qt3], should=[qt2], minimum_should_match=0 ) q1 = query.Bool(must=[qt1], should=[qt1, qt2]) q2 = query.Bool(should=[qt3]) assert q1 & q2 == query.Bool( must=[qt1, qt3], should=[qt1, qt2], minimum_should_match=0 ) def test_bool_and_bool_with_min_should_match() -> None: qt1, qt2 = query.Match(f=1), query.Match(f=2) q1 = query.Q("bool", minimum_should_match=1, should=[qt1]) q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) assert query.Q("bool", must=[qt1, qt2]) == q1 & q2 def test_negative_min_should_match() -> None: qt1, qt2 = query.Match(f=1), query.Match(f=2) q1 = query.Q("bool", minimum_should_match=-2, should=[qt1]) q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) with raises(ValueError): q1 & q2 with raises(ValueError): q2 & q1 def test_percentage_min_should_match() -> None: qt1, qt2 = query.Match(f=1), query.Match(f=2) q1 = query.Q("bool", minimum_should_match="50%", should=[qt1]) q2 = query.Q("bool", minimum_should_match=1, should=[qt2]) with raises(ValueError): q1 & q2 with raises(ValueError): q2 & q1 def test_inverted_query_becomes_bool_with_must_not() -> None: q = query.Match(f=42) assert ~q == query.Bool(must_not=[query.Match(f=42)]) def test_inverted_query_with_must_not_become_should() -> None: q = query.Q("bool", must_not=[query.Q("match", f=1), query.Q("match", f=2)]) assert ~q == query.Q("bool", should=[query.Q("match", f=1), query.Q("match", f=2)]) def test_inverted_query_with_must_and_must_not() -> None: q = query.Q( "bool", must=[query.Q("match", f=3), query.Q("match", f=4)], must_not=[query.Q("match", f=1), query.Q("match", f=2)], ) print((~q).to_dict()) assert ~q == query.Q( "bool", should=[ # negation of must query.Q("bool", must_not=[query.Q("match", f=3)]), query.Q("bool", must_not=[query.Q("match", f=4)]), # negation of must_not query.Q("match", f=1), query.Q("match", f=2), ], ) def test_double_invert_returns_original_query() -> None: q = query.Match(f=42) assert q == ~~q def test_bool_query_gets_inverted_internally() -> None: q = query.Bool(must_not=[query.Match(f=42)], must=[query.Match(g="v")]) assert ~q == query.Bool( should=[ # negating must query.Bool(must_not=[query.Match(g="v")]), # negating must_not query.Match(f=42), ] ) def test_match_all_or_something_is_match_all() -> None: q1 = query.MatchAll() q2 = query.Match(f=42) assert (q1 | q2) == query.MatchAll() assert (q2 | q1) == query.MatchAll() def test_or_produces_bool_with_should() -> None: q1 = query.Match(f=42) q2 = query.Match(g="v") q = q1 | q2 assert q == query.Bool(should=[q1, q2]) def test_or_bool_doesnt_loop_infinitely_issue_37() -> None: q = query.Match(f=42) | ~query.Match(f=47) assert q == query.Bool( should=[query.Bool(must_not=[query.Match(f=47)]), query.Match(f=42)] ) def test_or_bool_doesnt_loop_infinitely_issue_96() -> None: q = ~query.Match(f=42) | ~query.Match(f=47) assert q == query.Bool( should=[ query.Bool(must_not=[query.Match(f=42)]), query.Bool(must_not=[query.Match(f=47)]), ] ) def test_bool_will_append_another_query_with_or() -> None: qb = query.Bool(should=[query.Match(f="v"), query.Match(f="v2")]) q = query.Match(g=42) assert (q | qb) == query.Bool(should=[query.Match(f="v"), query.Match(f="v2"), q]) def test_bool_queries_with_only_should_get_concatenated() -> None: q1 = query.Bool(should=[query.Match(f=1), query.Match(f=2)]) q2 = query.Bool(should=[query.Match(f=3), query.Match(f=4)]) assert (q1 | q2) == query.Bool( should=[query.Match(f=1), query.Match(f=2), query.Match(f=3), query.Match(f=4)] ) def test_two_bool_queries_append_one_to_should_if_possible() -> None: q1 = query.Bool(should=[query.Match(f="v")]) q2 = query.Bool(must=[query.Match(f="v")]) assert (q1 | q2) == query.Bool( should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])] ) assert (q2 | q1) == query.Bool( should=[query.Match(f="v"), query.Bool(must=[query.Match(f="v")])] ) def test_queries_are_registered() -> None: assert "match" in query.Query._classes assert query.Query._classes["match"] is query.Match def test_defining_query_registers_it() -> None: class MyQuery(query.Query): name = "my_query" assert "my_query" in query.Query._classes assert query.Query._classes["my_query"] is MyQuery def test_Q_passes_query_through() -> None: q = query.Match(f="value1") assert query.Q(q) is q def test_Q_constructs_query_by_name() -> None: q = query.Q("match", f="value") assert isinstance(q, query.Match) assert {"f": "value"} == q._params def test_Q_translates_double_underscore_to_dots_in_param_names() -> None: q = query.Q("match", comment__author="honza") assert {"comment.author": "honza"} == q._params def test_Q_doesn_translate_double_underscore_to_dots_in_param_names() -> None: q = query.Q("match", comment__author="honza", _expand__to_dot=False) assert {"comment__author": "honza"} == q._params def test_Q_constructs_simple_query_from_dict() -> None: q = query.Q({"match": {"f": "value"}}) assert isinstance(q, query.Match) assert {"f": "value"} == q._params def test_Q_constructs_compound_query_from_dict() -> None: q = query.Q({"bool": {"must": [{"match": {"f": "value"}}]}}) assert q == query.Bool(must=[query.Match(f="value")]) def test_Q_raises_error_when_passed_in_dict_and_params() -> None: with raises(Exception): # Ignore types as it's not a valid call query.Q({"match": {"f": "value"}}, f="value") # type: ignore[call-overload] def test_Q_raises_error_when_passed_in_query_and_params() -> None: q = query.Match(f="value1") with raises(Exception): # Ignore types as it's not a valid call signature query.Q(q, f="value") # type: ignore[call-overload] def test_Q_raises_error_on_unknown_query() -> None: with raises(Exception): query.Q("not a query", f="value") def test_match_all_and_anything_is_anything() -> None: q = query.MatchAll() s = query.Match(f=42) assert q & s == s assert s & q == s def test_function_score_with_functions() -> None: q = query.Q( "function_score", functions=[query.SF("script_score", script="doc['comment_count'] * _score")], ) assert { "function_score": { "functions": [{"script_score": {"script": "doc['comment_count'] * _score"}}] } } == q.to_dict() def test_function_score_with_no_function_is_boost_factor() -> None: q = query.Q( "function_score", functions=[query.SF({"weight": 20, "filter": query.Q("term", f=42)})], ) assert { "function_score": {"functions": [{"filter": {"term": {"f": 42}}, "weight": 20}]} } == q.to_dict() def test_function_score_to_dict() -> None: q = query.Q( "function_score", query=query.Q("match", title="python"), functions=[ query.SF("random_score"), query.SF( "field_value_factor", field="comment_count", filter=query.Q("term", tags="python"), ), ], ) d = { "function_score": { "query": {"match": {"title": "python"}}, "functions": [ {"random_score": {}}, { "filter": {"term": {"tags": "python"}}, "field_value_factor": {"field": "comment_count"}, }, ], } } assert d == q.to_dict() def test_function_score_class_based_to_dict() -> None: q = query.FunctionScore( query=query.Match(title="python"), functions=[ function.RandomScore(), function.FieldValueFactor( field="comment_count", filter=query.Term(tags="python"), ), ], ) d = { "function_score": { "query": {"match": {"title": "python"}}, "functions": [ {"random_score": {}}, { "filter": {"term": {"tags": "python"}}, "field_value_factor": {"field": "comment_count"}, }, ], } } assert d == q.to_dict() def test_function_score_with_single_function() -> None: d = { "function_score": { "filter": {"term": {"tags": "python"}}, "script_score": {"script": "doc['comment_count'] * _score"}, } } q = query.Q(d) assert isinstance(q, query.FunctionScore) assert isinstance(q.filter, query.Term) assert len(q.functions) == 1 sf = q.functions[0] assert isinstance(sf, function.ScriptScore) assert "doc['comment_count'] * _score" == sf.script def test_function_score_from_dict() -> None: d = { "function_score": { "filter": {"term": {"tags": "python"}}, "functions": [ { "filter": {"terms": {"tags": "python"}}, "script_score": {"script": "doc['comment_count'] * _score"}, }, {"boost_factor": 6}, ], } } q = query.Q(d) assert isinstance(q, query.FunctionScore) assert isinstance(q.filter, query.Term) assert len(q.functions) == 2 sf = q.functions[0] assert isinstance(sf, function.ScriptScore) assert isinstance(sf.filter, query.Terms) sf = q.functions[1] assert isinstance(sf, function.BoostFactor) assert 6 == sf.value assert {"boost_factor": 6} == sf.to_dict() def test_script_score() -> None: d = { "script_score": { "query": {"match_all": {}}, "script": {"source": "...", "params": {}}, } } q = query.Q(d) assert isinstance(q, query.ScriptScore) assert isinstance(q.query, query.MatchAll) assert q.script == {"source": "...", "params": {}} assert q.to_dict() == d def test_expand_double_underscore_to_dot_setting() -> None: q = query.Term(comment__count=2) assert q.to_dict() == {"term": {"comment.count": 2}} utils.EXPAND__TO_DOT = False q = query.Term(comment__count=2) assert q.to_dict() == {"term": {"comment__count": 2}} utils.EXPAND__TO_DOT = True def test_knn_query() -> None: q = query.Knn(field="image-vector", query_vector=[-5, 9, -12], num_candidates=10) assert q.to_dict() == { "knn": { "field": "image-vector", "query_vector": [-5, 9, -12], "num_candidates": 10, } } python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_result.py000066400000000000000000000151341506101734100262760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pickle from datetime import date from typing import Any, Dict from pytest import fixture, raises from elasticsearch.dsl import Date, Document, Object, Search, response from elasticsearch.dsl.aggs import Terms from elasticsearch.dsl.response.aggs import AggResponse, Bucket, BucketData from elasticsearch.dsl.utils import AttrDict @fixture def agg_response(aggs_search: Search, aggs_data: Dict[str, Any]) -> response.Response: return response.Response(aggs_search, aggs_data) def test_agg_response_is_pickleable(agg_response: response.Response) -> None: agg_response.hits r = pickle.loads(pickle.dumps(agg_response)) assert r == agg_response assert r._search == agg_response._search assert r.hits == agg_response.hits def test_response_is_pickleable(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response.body) # type: ignore[attr-defined] res.hits r = pickle.loads(pickle.dumps(res)) assert r == res assert r._search == res._search assert r.hits == res.hits def test_hit_is_pickleable(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response) hits = pickle.loads(pickle.dumps(res.hits)) assert hits == res.hits assert hits[0].meta == res.hits[0].meta def test_response_stores_search(dummy_response: Dict[str, Any]) -> None: s = Search() r = response.Response(s, dummy_response) assert r._search is s def test_attribute_error_in_hits_is_not_hidden(dummy_response: Dict[str, Any]) -> None: def f(hit: AttrDict[Any]) -> Any: raise AttributeError() s = Search().doc_type(employee=f) r = response.Response(s, dummy_response) with raises(TypeError): r.hits def test_interactive_helpers(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response) hits = res.hits h = hits[0] rhits = ( "[, , " ", ]" ).format( repr(dummy_response["hits"]["hits"][0]["_source"]), repr(dummy_response["hits"]["hits"][1]["_source"])[:60], repr(dummy_response["hits"]["hits"][2]["_source"])[:60], ) assert res assert f"" == repr(res) assert rhits == repr(hits) assert {"meta", "city", "name"} == set(dir(h)) assert "" % dummy_response["hits"]["hits"][0][ "_source" ] == repr(h) def test_empty_response_is_false(dummy_response: Dict[str, Any]) -> None: dummy_response["hits"]["hits"] = [] res = response.Response(Search(), dummy_response) assert not res def test_len_response(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response) assert len(res) == 4 def test_iterating_over_response_gives_you_hits(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response) hits = list(h for h in res) assert res.success() assert 123 == res.took assert 4 == len(hits) assert all(isinstance(h, response.Hit) for h in hits) h = hits[0] assert "test-index" == h.meta.index assert "company" == h.meta.doc_type assert "elasticsearch" == h.meta.id assert 12 == h.meta.score assert hits[1].meta.routing == "elasticsearch" def test_hits_get_wrapped_to_contain_additional_attrs( dummy_response: Dict[str, Any], ) -> None: res = response.Response(Search(), dummy_response) hits = res.hits assert 123 == hits.total # type: ignore[attr-defined] assert 12.0 == hits.max_score # type: ignore[attr-defined] def test_hits_provide_dot_and_bracket_access_to_attrs( dummy_response: Dict[str, Any], ) -> None: res = response.Response(Search(), dummy_response) h = res.hits[0] assert "Elasticsearch" == h.name assert "Elasticsearch" == h["name"] assert "Honza" == res.hits[2].name.first with raises(KeyError): h["not_there"] with raises(AttributeError): h.not_there def test_slicing_on_response_slices_on_hits(dummy_response: Dict[str, Any]) -> None: res = response.Response(Search(), dummy_response) assert res[0] is res.hits[0] assert res[::-1] == res.hits[::-1] def test_aggregation_base(agg_response: response.Response) -> None: assert agg_response.aggs is agg_response.aggregations assert isinstance(agg_response.aggs, response.AggResponse) def test_metric_agg_works(agg_response: response.Response) -> None: assert 25052.0 == agg_response.aggs.sum_lines.value def test_aggregations_can_be_iterated_over(agg_response: response.Response) -> None: aggs = [a for a in agg_response.aggs] assert len(aggs) == 3 assert all(map(lambda a: isinstance(a, AggResponse), aggs)) def test_aggregations_can_be_retrieved_by_name( agg_response: response.Response, aggs_search: Search ) -> None: a = agg_response.aggs["popular_files"] assert isinstance(a, BucketData) assert isinstance(a._meta["aggs"], Terms) assert a._meta["aggs"] is aggs_search.aggs.aggs["popular_files"] def test_bucket_response_can_be_iterated_over(agg_response: response.Response) -> None: popular_files = agg_response.aggregations.popular_files buckets = [b for b in popular_files] assert all(isinstance(b, Bucket) for b in buckets) assert buckets == popular_files.buckets def test_bucket_keys_get_deserialized( aggs_data: Dict[str, Any], aggs_search: Search ) -> None: class Commit(Document): info = Object(properties={"committed_date": Date()}) class Index: name = "test-commit" aggs_search = aggs_search.doc_type(Commit) agg_response = response.Response(aggs_search, aggs_data) per_month = agg_response.aggregations.per_month for b in per_month: assert isinstance(b.key, date) python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_utils.py000066400000000000000000000073371506101734100261260ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pickle from typing import Any, Dict, Tuple from pytest import raises from elasticsearch.dsl import Q, serializer, utils def test_attrdict_pickle() -> None: ad: utils.AttrDict[str] = utils.AttrDict({}) pickled_ad = pickle.dumps(ad) assert ad == pickle.loads(pickled_ad) def test_attrlist_pickle() -> None: al = utils.AttrList[Any]([]) pickled_al = pickle.dumps(al) assert al == pickle.loads(pickled_al) def test_attrlist_slice() -> None: class MyAttrDict(utils.AttrDict[str]): pass l = utils.AttrList[Any]([{}, {}], obj_wrapper=MyAttrDict) assert isinstance(l[:][0], MyAttrDict) def test_attrlist_with_type_argument() -> None: a = utils.AttrList[str](["a", "b"]) assert list(a) == ["a", "b"] def test_attrdict_keys_items() -> None: a = utils.AttrDict({"a": {"b": 42, "c": 47}, "d": "e"}) assert list(a.keys()) == ["a", "d"] assert list(a.items()) == [("a", {"b": 42, "c": 47}), ("d", "e")] def test_attrdict_with_type_argument() -> None: a = utils.AttrDict[str]({"a": "b"}) assert list(a.keys()) == ["a"] assert list(a.items()) == [("a", "b")] def test_merge() -> None: a: utils.AttrDict[Any] = utils.AttrDict({"a": {"b": 42, "c": 47}}) b = {"a": {"b": 123, "d": -12}, "e": [1, 2, 3]} utils.merge(a, b) assert a == {"a": {"b": 123, "c": 47, "d": -12}, "e": [1, 2, 3]} def test_merge_conflict() -> None: data: Tuple[Dict[str, Any], ...] = ( {"a": 42}, {"a": {"b": 47}}, ) for d in data: utils.merge({"a": {"b": 42}}, d) with raises(ValueError): utils.merge({"a": {"b": 42}}, d, True) def test_attrdict_bool() -> None: d: utils.AttrDict[str] = utils.AttrDict({}) assert not d d.title = "Title" assert d def test_attrlist_items_get_wrapped_during_iteration() -> None: al = utils.AttrList([1, object(), [1], {}]) l = list(iter(al)) assert isinstance(l[2], utils.AttrList) assert isinstance(l[3], utils.AttrDict) def test_serializer_deals_with_Attr_versions() -> None: d = utils.AttrDict({"key": utils.AttrList([1, 2, 3])}) assert serializer.serializer.dumps(d) == serializer.serializer.dumps( {"key": [1, 2, 3]} ) def test_serializer_deals_with_objects_with_to_dict() -> None: class MyClass: def to_dict(self) -> int: return 42 assert serializer.serializer.dumps(MyClass()) == b"42" def test_recursive_to_dict() -> None: assert utils.recursive_to_dict({"k": [1, (1.0, {"v": Q("match", key="val")})]}) == { "k": [1, (1.0, {"v": {"match": {"key": "val"}}})] } def test_attrlist_to_list() -> None: l = utils.AttrList[Any]([{}, {}]).to_list() assert isinstance(l, list) assert l == [{}, {}] def test_attrdict_with_reserved_keyword() -> None: d = utils.AttrDict({"from": 10, "size": 20}) assert d.from_ == 10 assert d.size == 20 d = utils.AttrDict({}) d.from_ = 10 assert {"from": 10} == d.to_dict() python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_validation.py000066400000000000000000000101231506101734100271030ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime from typing import Any from pytest import raises from elasticsearch.dsl import ( Date, Document, InnerDoc, Integer, Nested, Object, Text, mapped_field, ) from elasticsearch.dsl.exceptions import ValidationException class Author(InnerDoc): name: str email: str def clean(self) -> None: if not self.name: raise ValidationException("name is missing") if not self.email: raise ValidationException("email is missing") elif self.name.lower() not in self.email: raise ValidationException("Invalid email!") class BlogPost(Document): authors = Nested(Author, required=True) created = Date() inner = Object() class BlogPostWithStatus(Document): published: bool = mapped_field(init=False) class AutoNowDate(Date): def clean(self, data: Any) -> Any: if data is None: data = datetime.now() return super().clean(data) class Log(Document): timestamp = AutoNowDate(required=True) data = Text() def test_required_int_can_be_0() -> None: class DT(Document): i = Integer(required=True) dt = DT(i=0) dt.full_clean() def test_required_field_cannot_be_empty_list() -> None: class DT(Document): i = Integer(required=True) dt = DT(i=[]) with raises(ValidationException): dt.full_clean() def test_validation_works_for_lists_of_values() -> None: class DT(Document): i = Date(required=True) dt = DT(i=[datetime.now(), "not date"]) with raises(ValidationException): dt.full_clean() dt = DT(i=[datetime.now(), datetime.now()]) dt.full_clean() def test_field_with_custom_clean() -> None: l = Log() l.full_clean() assert isinstance(l.timestamp, datetime) def test_empty_object() -> None: d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}]) d.inner = {} # type: ignore[assignment] d.full_clean() def test_missing_required_field_raises_validation_exception() -> None: d = BlogPost() with raises(ValidationException): d.full_clean() d = BlogPost() d.authors.append({"name": "Honza"}) with raises(ValidationException): d.full_clean() d = BlogPost() d.authors.append({"name": "Honza", "email": "honza@elastic.co"}) d.full_clean() def test_boolean_doesnt_treat_false_as_empty() -> None: d = BlogPostWithStatus() with raises(ValidationException): d.full_clean() d.published = False d.full_clean() d.published = True d.full_clean() def test_custom_validation_on_nested_gets_run() -> None: d = BlogPost(authors=[Author(name="Honza", email="king@example.com")], created=None) assert isinstance(d.authors[0], Author) # type: ignore[index] with raises(ValidationException): d.full_clean() def test_accessing_known_fields_returns_empty_value() -> None: d = BlogPost() assert [] == d.authors d.authors.append({}) assert None is d.authors[0].name # type: ignore[index] assert None is d.authors[0].email def test_empty_values_are_not_serialized() -> None: d = BlogPost(authors=[{"name": "Honza", "email": "honza@elastic.co"}], created=None) d.full_clean() assert d.to_dict() == {"authors": [{"name": "Honza", "email": "honza@elastic.co"}]} python-elasticsearch-9.1.1/test_elasticsearch/test_dsl/test_wrappers.py000066400000000000000000000057611506101734100266300ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence if TYPE_CHECKING: from _operator import _SupportsComparison import pytest from elasticsearch.dsl import Range @pytest.mark.parametrize( "kwargs, item", [ ({}, 1), ({}, -1), ({"gte": -1}, -1), ({"lte": 4}, 4), ({"lte": 4, "gte": 2}, 4), ({"lte": 4, "gte": 2}, 2), ({"gt": datetime.now() - timedelta(seconds=10)}, datetime.now()), ], ) def test_range_contains( kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison" ) -> None: assert item in Range(**kwargs) @pytest.mark.parametrize( "kwargs, item", [ ({"gt": -1}, -1), ({"lt": 4}, 4), ({"lt": 4}, 42), ({"lte": 4, "gte": 2}, 1), ({"lte": datetime.now() - timedelta(seconds=10)}, datetime.now()), ], ) def test_range_not_contains( kwargs: Mapping[str, "_SupportsComparison"], item: "_SupportsComparison" ) -> None: assert item not in Range(**kwargs) @pytest.mark.parametrize( "args,kwargs", [ (({},), {"lt": 42}), ((), {"not_lt": 42}), ((object(),), {}), ((), {"lt": 1, "lte": 1}), ((), {"gt": 1, "gte": 1}), ], ) def test_range_raises_value_error_on_wrong_params( args: Sequence[Any], kwargs: Mapping[str, "_SupportsComparison"] ) -> None: with pytest.raises(ValueError): Range(*args, **kwargs) @pytest.mark.parametrize( "range,lower,inclusive", [ (Range(gt=1), 1, False), (Range(gte=1), 1, True), (Range(), None, False), (Range(lt=42), None, False), ], ) def test_range_lower( range: Range["_SupportsComparison"], lower: Optional["_SupportsComparison"], inclusive: bool, ) -> None: assert (lower, inclusive) == range.lower @pytest.mark.parametrize( "range,upper,inclusive", [ (Range(lt=1), 1, False), (Range(lte=1), 1, True), (Range(), None, False), (Range(gt=42), None, False), ], ) def test_range_upper( range: Range["_SupportsComparison"], upper: Optional["_SupportsComparison"], inclusive: bool, ) -> None: assert (upper, inclusive) == range.upper python-elasticsearch-9.1.1/test_elasticsearch/test_esql.py000066400000000000000000000463261506101734100241120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from elasticsearch.dsl import E from elasticsearch.esql import ESQL, and_, functions, not_, or_ def test_from(): query = ESQL.from_("employees") assert query.render() == "FROM employees" query = ESQL.from_("") assert query.render() == "FROM " query = ESQL.from_("employees-00001", "other-employees-*") assert query.render() == "FROM employees-00001, other-employees-*" query = ESQL.from_("cluster_one:employees-00001", "cluster_two:other-employees-*") assert ( query.render() == "FROM cluster_one:employees-00001, cluster_two:other-employees-*" ) query = ESQL.from_("employees").metadata("_id") assert query.render() == "FROM employees METADATA _id" def test_row(): query = ESQL.row(a=1, b="two", c=None) assert query.render() == 'ROW a = 1, b = "two", c = null' query = ESQL.row(a=[2, 1]) assert query.render() == "ROW a = [2, 1]" query = ESQL.row(a=functions.round(1.23, 0)) assert query.render() == "ROW a = ROUND(1.23, 0)" def test_show(): query = ESQL.show("INFO") assert query.render() == "SHOW INFO" def test_change_point(): query = ( ESQL.row(key=list(range(1, 26))) .mv_expand("key") .eval(value=functions.case(E("key") < 13, 0, 42)) .change_point("value") .on("key") .where("type IS NOT NULL") ) assert ( query.render() == """ROW key = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] | MV_EXPAND key | EVAL value = CASE(key < 13, 0, 42) | CHANGE_POINT value ON key | WHERE type IS NOT NULL""" ) def test_completion(): query = ( ESQL.row(question="What is Elasticsearch?") .completion("question") .with_("test_completion_model") .keep("question", "completion") ) assert ( query.render() == """ROW question = "What is Elasticsearch?" | COMPLETION question WITH {"inference_id": "test_completion_model"} | KEEP question, completion""" ) query = ( ESQL.row(question="What is Elasticsearch?") .completion(answer=E("question")) .with_("test_completion_model") .keep("question", "answer") ) assert ( query.render() == """ROW question = "What is Elasticsearch?" | COMPLETION answer = question WITH {"inference_id": "test_completion_model"} | KEEP question, answer""" ) query = ( ESQL.from_("movies") .sort("rating DESC") .limit(10) .eval( prompt="""CONCAT( "Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n", )""" ) .completion(summary="prompt") .with_("test_completion_model") .keep("title", "summary", "rating") ) assert ( query.render() == """FROM movies | SORT rating DESC | LIMIT 10 | EVAL prompt = CONCAT( "Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n", ) | COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"} | KEEP title, summary, rating""" ) query = ( ESQL.from_("movies") .sort("rating DESC") .limit(10) .eval( prompt=functions.concat( "Summarize this movie using the following information: \n", "Title: ", E("title"), "\n", "Synopsis: ", E("synopsis"), "\n", "Actors: ", functions.mv_concat(E("actors"), ", "), "\n", ) ) .completion(summary="prompt") .with_("test_completion_model") .keep("title", "summary", "rating") ) assert ( query.render() == """FROM movies | SORT rating DESC | LIMIT 10 | EVAL prompt = CONCAT("Summarize this movie using the following information: \\n", "Title: ", title, "\\n", "Synopsis: ", synopsis, "\\n", "Actors: ", MV_CONCAT(actors, ", "), "\\n") | COMPLETION summary = prompt WITH {"inference_id": "test_completion_model"} | KEEP title, summary, rating""" ) def test_dissect(): query = ( ESQL.row(a="2023-01-23T12:15:00.000Z - some text - 127.0.0.1") .dissect("a", "%{date} - %{msg} - %{ip}") .keep("date", "msg", "ip") ) assert ( query.render() == """ROW a = "2023-01-23T12:15:00.000Z - some text - 127.0.0.1" | DISSECT a "%{date} - %{msg} - %{ip}" | KEEP date, msg, ip""" ) def test_drop(): query = ESQL.from_("employees").drop("height") assert query.render() == "FROM employees\n| DROP height" query = ESQL.from_("employees").drop("height*") assert query.render() == "FROM employees\n| DROP height*" def test_enrich(): query = ESQL.row(language_code="1").enrich("languages_policy") assert ( query.render() == """ROW language_code = "1" | ENRICH languages_policy""" ) query = ESQL.row(language_code="1").enrich("languages_policy").on("a") assert ( query.render() == """ROW language_code = "1" | ENRICH languages_policy ON a""" ) query = ( ESQL.row(language_code="1") .enrich("languages_policy") .on("a") .with_(name="language_name") ) assert ( query.render() == """ROW language_code = "1" | ENRICH languages_policy ON a WITH name = language_name""" ) def test_eval(): query = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(height_feet=E("height") * 3.281, height_cm=E("height") * 100) ) assert ( query.render() == """FROM employees | SORT emp_no | KEEP first_name, last_name, height | EVAL height_feet = height * 3.281, height_cm = height * 100""" ) query = ( ESQL.from_("employees") .sort("emp_no") .keep("first_name", "last_name", "height") .eval(E("height") * 3.281) ) assert ( query.render() == """FROM employees | SORT emp_no | KEEP first_name, last_name, height | EVAL height * 3.281""" ) query = ( ESQL.from_("employees") .eval("height * 3.281") .stats(avg_height_feet=functions.avg(E("`height * 3.281`"))) ) assert ( query.render() == """FROM employees | EVAL height * 3.281 | STATS avg_height_feet = AVG(`height * 3.281`)""" ) def test_fork(): query = ( ESQL.from_("employees") .fork( ESQL.branch().where(E("emp_no") == 10001), ESQL.branch().where("emp_no == 10002"), ) .keep("emp_no", "_fork") .sort("emp_no") ) assert ( query.render() == """FROM employees | FORK ( WHERE emp_no == 10001 ) ( WHERE emp_no == 10002 ) | KEEP emp_no, _fork | SORT emp_no""" ) def test_grok(): query = ( ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") .grok( "a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}", ) .keep("date", "ip", "email", "num") ) assert ( query.render() == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" | GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num}" | KEEP date, ip, email, num""" ) query = ( ESQL.row(a="2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42") .grok( "a", "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}", ) .keep("date", "ip", "email", "num") .eval(date=functions.to_datetime(E("date"))) ) assert ( query.render() == """ROW a = "2023-01-23T12:15:00.000Z 127.0.0.1 some.email@foo.com 42" | GROK a "%{TIMESTAMP_ISO8601:date} %{IP:ip} %{EMAILADDRESS:email} %{NUMBER:num:int}" | KEEP date, ip, email, num | EVAL date = TO_DATETIME(date)""" ) query = ( ESQL.from_("addresses") .keep("city.name", "zip_code") .grok("zip_code", "%{WORD:zip_parts} %{WORD:zip_parts}") ) assert ( query.render() == """FROM addresses | KEEP city.name, zip_code | GROK zip_code "%{WORD:zip_parts} %{WORD:zip_parts}\"""" ) def test_keep(): query = ESQL.from_("employees").keep("emp_no", "first_name", "last_name", "height") assert ( query.render() == "FROM employees\n| KEEP emp_no, first_name, last_name, height" ) query = ESQL.from_("employees").keep("h*") assert query.render() == "FROM employees\n| KEEP h*" query = ESQL.from_("employees").keep("*", "first_name") assert query.render() == "FROM employees\n| KEEP *, first_name" def test_limit(): query = ESQL.from_("index").where(E("field") == "value").limit(1000) assert query.render() == 'FROM index\n| WHERE field == "value"\n| LIMIT 1000' query = ( ESQL.from_("index").stats(functions.avg(E("field1"))).by("field2").limit(20000) ) assert ( query.render() == "FROM index\n| STATS AVG(field1)\n BY field2\n| LIMIT 20000" ) def test_lookup_join(): query = ( ESQL.from_("firewall_logs") .lookup_join("threat_list") .on("source.IP") .where("threat_level IS NOT NULL") ) assert ( query.render() == """FROM firewall_logs | LOOKUP JOIN threat_list ON source.IP | WHERE threat_level IS NOT NULL""" ) query = ( ESQL.from_("system_metrics") .lookup_join("host_inventory") .on("host.name") .lookup_join("ownerships") .on("host.name") ) assert ( query.render() == """FROM system_metrics | LOOKUP JOIN host_inventory ON host.name | LOOKUP JOIN ownerships ON host.name""" ) query = ESQL.from_("app_logs").lookup_join("service_owners").on("service_id") assert ( query.render() == """FROM app_logs | LOOKUP JOIN service_owners ON service_id""" ) query = ( ESQL.from_("employees") .eval(language_code="languages") .where(E("emp_no") >= 10091, E("emp_no") < 10094) .lookup_join("languages_lookup") .on("language_code") ) assert ( query.render() == """FROM employees | EVAL language_code = languages | WHERE emp_no >= 10091 AND emp_no < 10094 | LOOKUP JOIN languages_lookup ON language_code""" ) def test_mv_expand(): query = ESQL.row(a=[1, 2, 3], b="b", j=["a", "b"]).mv_expand("a") assert ( query.render() == """ROW a = [1, 2, 3], b = "b", j = ["a", "b"] | MV_EXPAND a""" ) def test_rename(): query = ( ESQL.from_("employees") .keep("first_name", "last_name", "still_hired") .rename(still_hired="employed") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, still_hired | RENAME still_hired AS employed""" ) def test_sample(): query = ESQL.from_("employees").keep("emp_no").sample(0.05) assert ( query.render() == """FROM employees | KEEP emp_no | SAMPLE 0.05""" ) def test_sort(): query = ( ESQL.from_("employees").keep("first_name", "last_name", "height").sort("height") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, height | SORT height""" ) query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("height DESC") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, height | SORT height DESC""" ) query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("height DESC", "first_name ASC") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, height | SORT height DESC, first_name ASC""" ) query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .sort("first_name ASC NULLS FIRST") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, height | SORT first_name ASC NULLS FIRST""" ) def test_stats(): query = ( ESQL.from_("employees") .stats(count=functions.count(E("emp_no"))) .by("languages") .sort("languages") ) assert ( query.render() == """FROM employees | STATS count = COUNT(emp_no) BY languages | SORT languages""" ) query = ESQL.from_("employees").stats(avg_lang=functions.avg(E("languages"))) assert ( query.render() == """FROM employees | STATS avg_lang = AVG(languages)""" ) query = ESQL.from_("employees").stats( avg_lang=functions.avg(E("languages")), max_lang=functions.max(E("languages")) ) assert ( query.render() == """FROM employees | STATS avg_lang = AVG(languages), max_lang = MAX(languages)""" ) query = ( ESQL.from_("employees") .stats( avg50s=functions.avg(E("salary")).where('birth_date < "1960-01-01"'), avg60s=functions.avg(E("salary")).where('birth_date >= "1960-01-01"'), ) .by("gender") .sort("gender") ) assert ( query.render() == """FROM employees | STATS avg50s = AVG(salary) WHERE birth_date < "1960-01-01", avg60s = AVG(salary) WHERE birth_date >= "1960-01-01" BY gender | SORT gender""" ) query = ( ESQL.from_("employees") .eval(Ks="salary / 1000") .stats( under_40K=functions.count(E("*")).where("Ks < 40"), inbetween=functions.count(E("*")).where("40 <= Ks", "Ks < 60"), over_60K=functions.count(E("*")).where("60 <= Ks"), total=functions.count(E("*")), ) ) assert ( query.render() == """FROM employees | EVAL Ks = salary / 1000 | STATS under_40K = COUNT(*) WHERE Ks < 40, inbetween = COUNT(*) WHERE (40 <= Ks) AND (Ks < 60), over_60K = COUNT(*) WHERE 60 <= Ks, total = COUNT(*)""" ) query = ( ESQL.row(i=1, a=["a", "b"]).stats(functions.min(E("i"))).by("a").sort("a ASC") ) assert ( query.render() == 'ROW i = 1, a = ["a", "b"]\n| STATS MIN(i)\n BY a\n| SORT a ASC' ) query = ( ESQL.from_("employees") .eval(hired=functions.date_format(E("hire_date"), "yyyy")) .stats(avg_salary=functions.avg(E("salary"))) .by("hired", "languages.long") .eval(avg_salary=functions.round(E("avg_salary"))) .sort("hired", "languages.long") ) assert ( query.render() == """FROM employees | EVAL hired = DATE_FORMAT("yyyy", hire_date) | STATS avg_salary = AVG(salary) BY hired, languages.long | EVAL avg_salary = ROUND(avg_salary) | SORT hired, languages.long""" ) def test_where(): query = ( ESQL.from_("employees") .keep("first_name", "last_name", "still_hired") .where("still_hired == true") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, still_hired | WHERE still_hired == true""" ) query = ESQL.from_("sample_data").where("@timestamp > NOW() - 1 hour") assert ( query.render() == """FROM sample_data | WHERE @timestamp > NOW() - 1 hour""" ) query = ( ESQL.from_("employees") .keep("first_name", "last_name", "height") .where("LENGTH(first_name) < 4") ) assert ( query.render() == """FROM employees | KEEP first_name, last_name, height | WHERE LENGTH(first_name) < 4""" ) def test_and_operator(): query = ESQL.from_("index").where( and_(E("age") > 30, E("age") < 40, E("name").is_not_null()) ) assert ( query.render() == """FROM index | WHERE (age > 30) AND (age < 40) AND (name IS NOT NULL)""" ) def test_or_operator(): query = ESQL.from_("index").where( or_(E("age") < 30, E("age") > 40, E("name").is_null()) ) assert ( query.render() == """FROM index | WHERE (age < 30) OR (age > 40) OR (name IS NULL)""" ) def test_not_operator(): query = ESQL.from_("index").where(not_(E("age") > 40)) assert ( query.render() == """FROM index | WHERE NOT (age > 40)""" ) def test_in_operator(): query = ESQL.row(a=1, b=4, c=3).where((E("c") - E("a")).in_(3, E("b") / 2, "a")) assert ( query.render() == """ROW a = 1, b = 4, c = 3 | WHERE c - a IN (3, b / 2, a)""" ) def test_like_operator(): query = ( ESQL.from_("employees") .where(E("first_name").like("?b*")) .keep("first_name", "last_name") ) assert ( query.render() == """FROM employees | WHERE first_name LIKE "?b*" | KEEP first_name, last_name""" ) query = ESQL.row(message="foo * bar").where(E("message").like("foo \\* bar")) assert ( query.render() == """ROW message = "foo * bar" | WHERE message LIKE "foo \\\\* bar\"""" ) query = ESQL.row(message="foobar").where(E("message").like("foo*", "bar?")) assert ( query.render() == """ROW message = "foobar" | WHERE message LIKE ("foo*", "bar?")""" ) def test_rlike_operator(): query = ( ESQL.from_("employees") .where(E("first_name").rlike(".leja*")) .keep("first_name", "last_name") ) assert ( query.render() == """FROM employees | WHERE first_name RLIKE ".leja*" | KEEP first_name, last_name""" ) query = ESQL.row(message="foo ( bar").where(E("message").rlike("foo \\( bar")) assert ( query.render() == """ROW message = "foo ( bar" | WHERE message RLIKE "foo \\\\( bar\"""" ) query = ESQL.row(message="foobar").where(E("message").rlike("foo.*", "bar.")) assert ( query.render() == """ROW message = "foobar" | WHERE message RLIKE ("foo.*", "bar.")""" ) def test_match_operator(): query = ESQL.from_("books").where(E("author").match("Faulkner")) assert ( query.render() == """FROM books | WHERE author:"Faulkner\"""" ) def test_parameters(): query = ESQL.from_("employees").where("name == ?") assert query.render() == "FROM employees\n| WHERE name == ?" query = ESQL.from_("employees").where(E("name") == E("?")) assert query.render() == "FROM employees\n| WHERE name == ?" python-elasticsearch-9.1.1/test_elasticsearch/test_exceptions.py000066400000000000000000000056621506101734100253250ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from elastic_transport import ApiResponseMeta from elasticsearch.exceptions import ApiError error_meta = ApiResponseMeta( status=500, http_version="1.1", headers={}, duration=0.0, node=None ) class TestTransformError: def test_transform_error_parse_with_error_reason(self): e = ApiError( message="InternalServerError", meta=error_meta, body={ "error": {"root_cause": [{"type": "error", "reason": "error reason"}]} }, ) assert str(e) == "ApiError(500, 'InternalServerError', 'error reason')" def test_transform_error_parse_with_error_string(self): e = ApiError( message="InternalServerError", meta=error_meta, body={"error": "something error message"}, ) assert ( str(e) == "ApiError(500, 'InternalServerError', 'something error message')" ) def test_transform_invalid_media_type_error(self): e = ApiError( message="InvalidMediaType", meta=error_meta, body={ "error": { "root_cause": [ { "type": "media_type_header_exception", "reason": "Invalid media-type value on headers [Accept, Content-Type]", } ], "type": "media_type_header_exception", "reason": "Invalid media-type value on headers [Accept, Content-Type]", "caused_by": { "type": "status_exception", "reason": "Accept version must be either version 8 or 7, but found 9. Accept=application/vnd.elasticsearch+json; compatible-with=9", }, }, "status": 400, }, ) assert str(e) == ( "ApiError(500, 'InvalidMediaType', " "'Invalid media-type value on headers [Accept, Content-Type]', " "Accept version must be either version 8 or 7, but found 9. " "Accept=application/vnd.elasticsearch+json; compatible-with=9)" ) python-elasticsearch-9.1.1/test_elasticsearch/test_helpers.py000066400000000000000000000156521506101734100246060ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pickle import threading import time from unittest import mock import pytest from elasticsearch import Elasticsearch, helpers from elasticsearch.serializer import JSONSerializer lock_side_effect = threading.Lock() def mock_process_bulk_chunk(*args, **kwargs): """ Threadsafe way of mocking process bulk chunk: https://stackoverflow.com/questions/39332139/thread-safe-version-of-mock-call-count """ with lock_side_effect: mock_process_bulk_chunk.call_count += 1 time.sleep(0.1) return [] mock_process_bulk_chunk.call_count = 0 class TestParallelBulk: @mock.patch( "elasticsearch.helpers.actions._process_bulk_chunk", side_effect=mock_process_bulk_chunk, ) def test_all_chunks_sent(self, _process_bulk_chunk): actions = ({"x": i} for i in range(100)) list( helpers.parallel_bulk( Elasticsearch("http://localhost:9200"), actions, chunk_size=2 ) ) assert 50 == mock_process_bulk_chunk.call_count @mock.patch( "elasticsearch.helpers.actions._process_bulk_chunk", # make sure we spend some time in the thread side_effect=lambda *_, **__: [ (True, time.sleep(0.001) or threading.current_thread().ident) ], ) def test_chunk_sent_from_different_threads(self, _process_bulk_chunk): actions = ({"x": i} for i in range(100)) results = list( helpers.parallel_bulk( Elasticsearch("http://localhost:9200"), actions, thread_count=10, chunk_size=2, ) ) assert len({r[1] for r in results}) > 1 class TestChunkActions: def setup_method(self, _): self.actions = [({"index": {}}, {"some": "datá", "i": i}) for i in range(100)] def test_expand_action(self): assert helpers.expand_action({}) == ({"index": {}}, {}) assert helpers.expand_action({"key": "val"}) == ({"index": {}}, {"key": "val"}) def test_expand_action_actions(self): assert helpers.expand_action( {"_op_type": "delete", "_id": "id", "_index": "index"} ) == ({"delete": {"_id": "id", "_index": "index"}}, None) assert helpers.expand_action( {"_op_type": "update", "_id": "id", "_index": "index", "key": "val"} ) == ({"update": {"_id": "id", "_index": "index"}}, {"key": "val"}) assert helpers.expand_action( {"_op_type": "create", "_id": "id", "_index": "index", "key": "val"} ) == ({"create": {"_id": "id", "_index": "index"}}, {"key": "val"}) assert helpers.expand_action( { "_op_type": "create", "_id": "id", "_index": "index", "_source": {"key": "val"}, } ) == ({"create": {"_id": "id", "_index": "index"}}, {"key": "val"}) def test_expand_action_options(self): for option in ( "_id", "_index", "_percolate", "_timestamp", "_type", "if_seq_no", "if_primary_term", "parent", "pipeline", "retry_on_conflict", "routing", "version", "version_type", ("_parent", "parent"), ("_retry_on_conflict", "retry_on_conflict"), ("_routing", "routing"), ("_version", "version"), ("_version_type", "version_type"), ("_if_seq_no", "if_seq_no"), ("_if_primary_term", "if_primary_term"), ): if isinstance(option, str): action_option = option else: option, action_option = option assert helpers.expand_action({"key": "val", option: 0}) == ( {"index": {action_option: 0}}, {"key": "val"}, ) def test__source_metadata_or_source(self): assert helpers.expand_action({"_source": {"key": "val"}}) == ( {"index": {}}, {"key": "val"}, ) assert helpers.expand_action( {"_source": ["key"], "key": "val", "_op_type": "update"} ) == ({"update": {"_source": ["key"]}}, {"key": "val"}) assert helpers.expand_action( {"_source": True, "key": "val", "_op_type": "update"} ) == ({"update": {"_source": True}}, {"key": "val"}) # This case is only to ensure backwards compatibility with old functionality. assert helpers.expand_action( {"_source": {"key2": "val2"}, "key": "val", "_op_type": "update"} ) == ({"update": {}}, {"key2": "val2"}) def test_chunks_are_chopped_by_byte_size(self): assert 100 == len( list(helpers._chunk_actions(self.actions, 100000, 1, JSONSerializer())) ) def test_chunks_are_chopped_by_chunk_size(self): assert 10 == len( list(helpers._chunk_actions(self.actions, 10, 99999999, JSONSerializer())) ) def test_chunks_are_chopped_by_byte_size_properly(self): max_byte_size = 170 chunks = list( helpers._chunk_actions( self.actions, 100000, max_byte_size, JSONSerializer() ) ) assert 25 == len(chunks) for chunk_data, chunk_actions in chunks: chunk = b"".join(chunk_actions) assert len(chunk) <= max_byte_size class TestExpandActions: @pytest.mark.parametrize("action", ["whatever", b"whatever"]) def test_string_actions_are_marked_as_simple_inserts(self, action): assert ({"index": {}}, b"whatever") == helpers.expand_action(action) def test_serialize_bulk_index_error(): error = helpers.BulkIndexError("message", [{"error": 1}]) pickled = pickle.loads(pickle.dumps(error)) assert pickled.__class__ == helpers.BulkIndexError assert pickled.errors == error.errors assert pickled.args == error.args def test_serialize_scan_error(): error = helpers.ScanError("scroll_id", "shard_message") pickled = pickle.loads(pickle.dumps(error)) assert pickled.__class__ == helpers.ScanError assert pickled.scroll_id == error.scroll_id assert pickled.args == error.args python-elasticsearch-9.1.1/test_elasticsearch/test_otel.py000066400000000000000000000077001506101734100241020ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from unittest import mock import pytest from elasticsearch import Elasticsearch, helpers try: from opentelemetry.sdk.trace import TracerProvider, export from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( InMemorySpanExporter, ) except ModuleNotFoundError: pass from elasticsearch._otel import ENABLED_ENV_VAR, OpenTelemetry pytestmark = [ pytest.mark.skipif( "TEST_WITH_OTEL" not in os.environ, reason="TEST_WITH_OTEL is not set" ), pytest.mark.otel, ] def setup_tracing(): tracer_provider = TracerProvider() memory_exporter = InMemorySpanExporter() span_processor = export.SimpleSpanProcessor(memory_exporter) tracer_provider.add_span_processor(span_processor) tracer = tracer_provider.get_tracer(__name__) return tracer, memory_exporter def test_enabled(): otel = OpenTelemetry() assert otel.enabled == (os.environ.get(ENABLED_ENV_VAR, "true") == "true") def test_minimal_span(): tracer, memory_exporter = setup_tracing() otel = OpenTelemetry(enabled=True, tracer=tracer) with otel.span("GET", endpoint_id=None, path_parts={}): pass spans = memory_exporter.get_finished_spans() assert len(spans) == 1 assert spans[0].name == "GET" assert spans[0].attributes == { "http.request.method": "GET", "db.system.name": "elasticsearch", } def test_detailed_span(): tracer, memory_exporter = setup_tracing() otel = OpenTelemetry(enabled=True, tracer=tracer) with otel.span( "GET", endpoint_id="ml.open_job", path_parts={"job_id": "my-job"}, ) as span: span.set_elastic_cloud_metadata( { "X-Found-Handling-Cluster": "e9106fc68e3044f0b1475b04bf4ffd5f", "X-Found-Handling-Instance": "instance-0000000001", } ) spans = memory_exporter.get_finished_spans() assert len(spans) == 1 assert spans[0].name == "ml.open_job" assert spans[0].attributes == { "http.request.method": "GET", "db.system.name": "elasticsearch", "db.operation.name": "ml.open_job", "db.operation.parameter.job_id": "my-job", "db.namespace": "e9106fc68e3044f0b1475b04bf4ffd5f", "elasticsearch.node.name": "instance-0000000001", } @mock.patch("elasticsearch._otel.OpenTelemetry.use_span") @mock.patch("elasticsearch._otel.OpenTelemetry.helpers_span") @mock.patch("elasticsearch.helpers.actions._process_bulk_chunk_success") @mock.patch("elasticsearch.Elasticsearch.bulk") def test_forward_otel_context_to_subthreads( _call_bulk_mock, _process_bulk_success_mock, _mock_otel_helpers_span, _mock_otel_use_span, ): tracer, memory_exporter = setup_tracing() es_client = Elasticsearch("http://localhost:9200") es_client._otel = OpenTelemetry(enabled=True, tracer=tracer) _call_bulk_mock.return_value = mock.Mock() actions = ({"x": i} for i in range(100)) list(helpers.parallel_bulk(es_client, actions, chunk_size=4)) # Ensures that the OTEL context has been forwarded to all chunks assert es_client._otel.helpers_span.call_count == 1 assert es_client._otel.use_span.call_count == 25 python-elasticsearch-9.1.1/test_elasticsearch/test_serializer.py000066400000000000000000000202041506101734100253020ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import uuid from datetime import datetime from decimal import Decimal import pytest try: import pyarrow as pa from elasticsearch.serializer import PyArrowSerializer except ImportError: pa = None try: import numpy as np import pandas as pd except ImportError: np = pd = None import re from elasticsearch import Elasticsearch from elasticsearch.exceptions import SerializationError from elasticsearch.serializer import JSONSerializer, OrjsonSerializer, TextSerializer requires_numpy_and_pandas = pytest.mark.skipif( np is None or pd is None, reason="Test requires numpy and pandas to be available" ) @pytest.fixture(params=[JSONSerializer, OrjsonSerializer]) def json_serializer(request: pytest.FixtureRequest): yield request.param() def test_datetime_serialization(json_serializer): assert b'{"d":"2010-10-01T02:30:00"}' == json_serializer.dumps( {"d": datetime(2010, 10, 1, 2, 30)} ) @requires_numpy_and_pandas def test_decimal_serialization(json_serializer): assert b'{"d":3.8}' == json_serializer.dumps({"d": Decimal("3.8")}) def test_uuid_serialization(json_serializer): assert b'{"d":"00000000-0000-0000-0000-000000000003"}' == json_serializer.dumps( {"d": uuid.UUID("00000000-0000-0000-0000-000000000003")} ) @requires_numpy_and_pandas def test_serializes_numpy_bool(json_serializer): assert b'{"d":true}' == json_serializer.dumps({"d": np.bool_(True)}) @requires_numpy_and_pandas def test_serializes_numpy_integers(json_serializer): for np_type in ( np.int_, np.int8, np.int16, np.int32, np.int64, ): assert json_serializer.dumps({"d": np_type(-1)}) == b'{"d":-1}' for np_type in ( np.uint8, np.uint16, np.uint32, np.uint64, ): assert json_serializer.dumps({"d": np_type(1)}) == b'{"d":1}' @requires_numpy_and_pandas def test_serializes_numpy_floats(json_serializer): for np_type in ( np.float32, np.float64, ): assert re.search( rb'^{"d":1\.2[\d]*}$', json_serializer.dumps({"d": np_type(1.2)}) ) @requires_numpy_and_pandas def test_serializes_numpy_datetime(json_serializer): assert b'{"d":"2010-10-01T02:30:00"}' == json_serializer.dumps( {"d": np.datetime64("2010-10-01T02:30:00")} ) @requires_numpy_and_pandas def test_serializes_numpy_ndarray(json_serializer): assert b'{"d":[0,0,0,0,0]}' == json_serializer.dumps( {"d": np.zeros((5,), dtype=np.uint8)} ) # This isn't useful for Elasticsearch, just want to make sure it works. assert b'{"d":[[0,0],[0,0]]}' == json_serializer.dumps( {"d": np.zeros((2, 2), dtype=np.uint8)} ) @requires_numpy_and_pandas def test_serializes_numpy_nan_to_nan(): assert b'{"d":NaN}' == JSONSerializer().dumps({"d": float("NaN")}) # NaN is invalid JSON, and orjson silently converts it to null assert b'{"d":null}' == OrjsonSerializer().dumps({"d": float("NaN")}) @requires_numpy_and_pandas def test_serializes_pandas_timestamp(json_serializer): assert b'{"d":"2010-10-01T02:30:00"}' == json_serializer.dumps( {"d": pd.Timestamp("2010-10-01T02:30:00")} ) @requires_numpy_and_pandas def test_serializes_pandas_series(json_serializer): assert b'{"d":["a","b","c","d"]}' == json_serializer.dumps( {"d": pd.Series(["a", "b", "c", "d"])} ) @requires_numpy_and_pandas @pytest.mark.skipif(not hasattr(pd, "NA"), reason="pandas.NA is required") def test_serializes_pandas_na(json_serializer): assert b'{"d":null}' == json_serializer.dumps({"d": pd.NA}) @requires_numpy_and_pandas @pytest.mark.skipif(not hasattr(pd, "NaT"), reason="pandas.NaT required") def test_raises_serialization_error_pandas_nat(json_serializer): with pytest.raises(SerializationError): json_serializer.dumps({"d": pd.NaT}) @requires_numpy_and_pandas def test_serializes_pandas_category(json_serializer): cat = pd.Categorical(["a", "c", "b", "a"], categories=["a", "b", "c"]) assert b'{"d":["a","c","b","a"]}' == json_serializer.dumps({"d": cat}) cat = pd.Categorical([1, 2, 3], categories=[1, 2, 3]) assert b'{"d":[1,2,3]}' == json_serializer.dumps({"d": cat}) @pytest.mark.skipif(pa is None, reason="Test requires pyarrow to be available") def test_pyarrow_loads(): data = [ pa.array([1, 2, 3, 4]), pa.array(["foo", "bar", "baz", None]), pa.array([True, None, False, True]), ] batch = pa.record_batch(data, names=["f0", "f1", "f2"]) sink = pa.BufferOutputStream() with pa.ipc.new_stream(sink, batch.schema) as writer: writer.write_batch(batch) serializer = PyArrowSerializer() assert serializer.loads(sink.getvalue()).to_pydict() == { "f0": [1, 2, 3, 4], "f1": ["foo", "bar", "baz", None], "f2": [True, None, False, True], } def test_json_raises_serialization_error_on_dump_error(json_serializer): with pytest.raises(SerializationError): json_serializer.dumps(object()) def test_raises_serialization_error_on_load_error(json_serializer): with pytest.raises(SerializationError): json_serializer.loads(object()) with pytest.raises(SerializationError): json_serializer.loads("") with pytest.raises(SerializationError): json_serializer.loads("{{") def test_strings_are_left_untouched(): assert b"\xe4\xbd\xa0\xe5\xa5\xbd" == TextSerializer().dumps("你好") def test_text_raises_serialization_error_on_dump_error(): with pytest.raises(SerializationError): TextSerializer().dumps({}) class TestDeserializer: def setup_method(self, _): self.serializers = Elasticsearch("http://localhost:9200").transport.serializers def test_deserializes_json_by_default(self): assert {"some": "data"} == self.serializers.loads('{"some":"data"}') @pytest.mark.parametrize("data", ['{"some":"data"}', b'{"some":"data"}']) def test_deserializes_text_with_correct_ct(self, data): assert '{"some":"data"}' == self.serializers.loads(data, "text/plain") assert '{"some":"data"}' == self.serializers.loads( data, "text/plain; charset=whatever" ) def test_deserialize_compatibility_header(self): for content_type in ( "application/vnd.elasticsearch+json;compatible-with=8", "application/vnd.elasticsearch+json; compatible-with=8", "application/vnd.elasticsearch+json;compatible-with=9", "application/vnd.elasticsearch+json; compatible-with=9", ): assert {"some": "data"} == self.serializers.loads( '{"some":"data"}', content_type ) assert b'{"some":"data"}' == self.serializers.dumps( '{"some":"data"}', content_type ) for content_type in ( "application/vnd.elasticsearch+x-ndjson;compatible-with=8", "application/vnd.elasticsearch+x-ndjson; compatible-with=8", "application/vnd.elasticsearch+x-ndjson;compatible-with=9", "application/vnd.elasticsearch+x-ndjson; compatible-with=9", ): assert b'{"some":"data"}\n{"some":"data"}\n' == self.serializers.dumps( ['{"some":"data"}', {"some": "data"}], content_type ) assert [{"some": "data"}, {"some": "data"}] == self.serializers.loads( b'{"some":"data"}\n{"some":"data"}\n', content_type ) python-elasticsearch-9.1.1/test_elasticsearch/test_server/000077500000000000000000000000001506101734100240675ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_server/__init__.py000066400000000000000000000014231506101734100262000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. python-elasticsearch-9.1.1/test_elasticsearch/test_server/conftest.py000066400000000000000000000046031506101734100262710ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import pytest import elasticsearch from ..utils import CA_CERTS, wipe_cluster # Information about the Elasticsearch instance running, if any # Used for ELASTICSEARCH_VERSION = "" ELASTICSEARCH_BUILD_HASH = "" ELASTICSEARCH_REST_API_TESTS = [] def _create(elasticsearch_url, transport=None, node_class=None): # Configure the client with certificates kw = {} if elasticsearch_url.startswith("https://"): kw["ca_certs"] = CA_CERTS # Optionally configure an HTTP conn class depending on # 'PYTHON_CONNECTION_CLASS' env var if "PYTHON_CONNECTION_CLASS" in os.environ: kw["node_class"] = os.environ["PYTHON_CONNECTION_CLASS"] if node_class is not None and "node_class" not in kw: kw["node_class"] = node_class if transport: kw["transport_class"] = transport # We do this little dance with the URL to force # Requests to respect 'headers: None' within rest API spec tests. return elasticsearch.Elasticsearch(elasticsearch_url, **kw) @pytest.fixture(scope="session") def sync_client_factory(elasticsearch_url): client = None try: client = _create(elasticsearch_url) # Wipe the cluster before we start testing just in case it wasn't wiped # cleanly from the previous run of pytest? wipe_cluster(client) yield client finally: if client: client.close() @pytest.fixture(scope="function") def sync_client(sync_client_factory): try: yield sync_client_factory finally: # Wipe the cluster clean after every test execution. wipe_cluster(sync_client_factory) python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_clients.py000066400000000000000000000042611506101734100271440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest @pytest.mark.parametrize("kwargs", [{"body": {"text": "привет"}}, {"text": "привет"}]) def test_indices_analyze_unicode(sync_client, kwargs): resp = sync_client.indices.analyze(**kwargs) assert resp == { "tokens": [ { "end_offset": 6, "position": 0, "start_offset": 0, "token": "привет", "type": "", } ] } def test_bulk_works_with_string_body(sync_client): docs = '{ "index" : { "_index" : "bulk_test_index", "_id" : "1" } }\n{"answer": 42}' resp = sync_client.bulk(body=docs) assert resp["errors"] is False assert 1 == len(resp["items"]) def test_bulk_works_with_bytestring_body(sync_client): docs = ( b'{ "index" : { "_index" : "bulk_test_index", "_id" : "2" } }\n{"answer": 42}\n' ) resp = sync_client.bulk(body=docs) assert resp["errors"] is False assert 1 == len(resp["items"]) # Pop inconsistent items before asserting resp["items"][0]["index"].pop("_id") resp["items"][0]["index"].pop("_version") assert resp["items"][0] == { "index": { "_index": "bulk_test_index", "result": "created", "_shards": {"total": 2, "successful": 1, "failed": 0}, "_seq_no": 0, "_primary_term": 1, "status": 201, } } python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_helpers.py000066400000000000000000001011371506101734100271450ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json from datetime import datetime, timedelta from unittest.mock import call, patch import pytest from dateutil import tz from elastic_transport import ApiResponseMeta, ObjectApiResponse from elasticsearch import ApiError, helpers from elasticsearch.helpers import ScanError class FailingBulkClient: def __init__( self, client, fail_at=(2,), fail_with=ApiError( message="Error!", body={}, meta=ApiResponseMeta( status=599, headers={}, http_version="1.1", duration=0, node=None ), ), ): self.client = client self._otel = client._otel self._called = 0 self._fail_at = fail_at self.transport = client.transport self._fail_with = fail_with def bulk(self, *args, **kwargs): self._called += 1 if self._called in self._fail_at: raise self._fail_with return self.client.bulk(*args, **kwargs) def options(self, **kwargs) -> "FailingBulkClient": return self def test_bulk_actions_remain_unchanged(sync_client): actions = [{"_id": 1}, {"_id": 2}] for ok, item in helpers.streaming_bulk(sync_client, actions, index="test-index"): assert ok assert [{"_id": 1}, {"_id": 2}] == actions def test_bulk_all_documents_get_inserted(sync_client): docs = [{"answer": x, "_id": x} for x in range(100)] for ok, item in helpers.streaming_bulk( sync_client, docs, index="test-index", refresh=True ): assert ok assert 100 == sync_client.count(index="test-index")["count"] assert {"answer": 42} == sync_client.get(index="test-index", id=42)["_source"] def test_bulk_all_errors_from_chunk_are_raised_on_failure(sync_client): sync_client.indices.create( index="i", body={ "mappings": {"properties": {"a": {"type": "integer"}}}, "settings": {"number_of_shards": 1, "number_of_replicas": 0}, }, ) try: for ok, _ in helpers.streaming_bulk( sync_client, [{"a": "b"}, {"a": "c"}], index="i", raise_on_error=True ): assert ok except helpers.BulkIndexError as e: assert 2 == len(e.errors) else: assert False, "exception should have been raised" def test_bulk_different_op_types(sync_client): sync_client.index(index="i", id=45, body={}) sync_client.index(index="i", id=42, body={}) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_op_type": "delete", "_index": "i", "_id": 45}, {"_op_type": "update", "_index": "i", "_id": 42, "doc": {"answer": 42}}, ] for ok, item in helpers.streaming_bulk(sync_client, docs): assert ok assert not sync_client.exists(index="i", id=45) assert {"answer": 42} == sync_client.get(index="i", id=42)["_source"] assert {"f": "v"} == sync_client.get(index="i", id=47)["_source"] def test_bulk_transport_error_can_becaught(sync_client): failing_client = FailingBulkClient(sync_client) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = list( helpers.streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, ) ) assert 3 == len(results) assert [True, False, True] == [r[0] for r in results] exc = results[1][1]["index"].pop("exception") assert isinstance(exc, ApiError) assert 599 == exc.status_code assert { "index": { "_index": "i", "_id": 45, "data": {"f": "v"}, "error": "ApiError(599, 'Error!')", "status": 599, } } == results[1][1] def test_bulk_rejected_documents_are_retried(sync_client): failing_client = FailingBulkClient( sync_client, fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = list( helpers.streaming_bulk( failing_client, docs, index="i", raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0, ) ) assert 3 == len(results) print(results) assert [True, True, True] == [r[0] for r in results] sync_client.indices.refresh(index="i") res = sync_client.search(index="i") assert {"value": 3, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called @pytest.mark.parametrize("use_bytes", [False, True]) def test_bulk_rejected_documents_are_retried_when_bytes_or_string( sync_client, use_bytes ): failing_client = FailingBulkClient( sync_client, fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [json.dumps({"field": x}, separators=(",", ":")) for x in range(3)] if use_bytes: docs = [doc.encode() for doc in docs] results = list( helpers.streaming_bulk( failing_client, docs, index="i", raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0, ) ) assert 3 == len(results) assert [True, True, True] == [r[0] for r in results] sync_client.indices.refresh(index="i") res = sync_client.search(index="i") assert {"value": 3, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called def test_bulk_rejected_documents_are_retried_at_most_max_retries_times(sync_client): failing_client = FailingBulkClient( sync_client, fail_at=(1, 2), fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = list( helpers.streaming_bulk( failing_client, docs, raise_on_exception=False, raise_on_error=False, chunk_size=1, max_retries=1, initial_backoff=0, ) ) assert 3 == len(results) assert [False, True, True] == [r[0] for r in results] sync_client.indices.refresh(index="i") res = sync_client.search(index="i") assert {"value": 2, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called def test_bulk_transport_error_is_raised_with_max_retries(sync_client): failing_client = FailingBulkClient( sync_client, fail_at=(1, 2, 3, 4), fail_with=ApiError( message="Rejected!", body={}, meta=ApiResponseMeta( status=429, headers={}, http_version="1.1", duration=0, node=None ), ), ) def streaming_bulk(): results = list( helpers.streaming_bulk( failing_client, [{"a": 42}, {"a": 39}], raise_on_exception=True, max_retries=3, initial_backoff=0, ) ) return results with pytest.raises(ApiError): streaming_bulk() assert 4 == failing_client._called def test_connection_timeout_is_retried_with_retry_status_callback(sync_client): failing_client = FailingBulkClient( sync_client, fail_with=ApiError( message="Connection timed out!", body={}, meta=ApiResponseMeta( status=522, headers={}, http_version="1.1", duration=0, node=None ), ), ) docs = [ {"_index": "i", "_id": 47, "f": "v"}, {"_index": "i", "_id": 45, "f": "v"}, {"_index": "i", "_id": 42, "f": "v"}, ] results = list( helpers.streaming_bulk( failing_client, docs, index="i", raise_on_exception=False, raise_on_error=False, chunk_size=1, retry_on_status=522, max_retries=1, initial_backoff=0, ) ) assert 3 == len(results) print(results) assert [True, True, True] == [r[0] for r in results] sync_client.indices.refresh(index="i") res = sync_client.search(index="i") assert {"value": 3, "relation": "eq"} == res["hits"]["total"] assert 4 == failing_client._called def test_bulk_works_with_single_item(sync_client): docs = [{"answer": 42, "_id": 1}] success, failed = helpers.bulk(sync_client, docs, index="test-index", refresh=True) assert 1 == success assert not failed assert 1 == sync_client.count(index="test-index")["count"] assert {"answer": 42} == sync_client.get(index="test-index", id=1)["_source"] def test_all_documents_get_inserted(sync_client): docs = [{"answer": x, "_id": x} for x in range(100)] success, failed = helpers.bulk(sync_client, docs, index="test-index", refresh=True) assert 100 == success assert not failed assert 100 == sync_client.count(index="test-index")["count"] assert {"answer": 42} == sync_client.get(index="test-index", id=42)["_source"] def test_stats_only_reports_numbers(sync_client): docs = [{"answer": x} for x in range(100)] success, failed = helpers.bulk( sync_client, docs, index="test-index", refresh=True, stats_only=True ) assert 100 == success assert 0 == failed assert 100 == sync_client.count(index="test-index")["count"] def test_errors_are_reported_correctly(sync_client): sync_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) success, failed = helpers.bulk( sync_client, [{"a": 42}, {"a": "c", "_id": 42}], index="i", raise_on_error=False, ) assert 1 == success assert 1 == len(failed) error = failed[0] assert "42" == error["index"]["_id"] assert "i" == error["index"]["_index"] print(error["index"]["error"]) assert error["index"]["error"]["type"] == "document_parsing_exception" def test_error_is_raised(sync_client): sync_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) with pytest.raises(helpers.BulkIndexError): helpers.bulk( sync_client, [{"a": 42}, {"a": "c"}], index="i", ) def test_ignore_error_if_raised(sync_client): # ignore the status code 400 in tuple helpers.bulk(sync_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=(400,)) # ignore the status code 400 in list helpers.bulk( sync_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=[ 400, ], ) # ignore the status code 400 helpers.bulk(sync_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=400) # ignore only the status code in the `ignore_status` argument with pytest.raises(helpers.BulkIndexError): helpers.bulk( sync_client, [{"a": 42}, {"a": "c"}], index="i", ignore_status=(444,), ) # ignore transport error exception failing_client = FailingBulkClient(sync_client) helpers.bulk(failing_client, [{"a": 42}], index="i", ignore_status=(599,)) def test_errors_are_collected_properly(sync_client): sync_client.indices.create( index="i", mappings={"properties": {"a": {"type": "integer"}}}, settings={"number_of_shards": 1, "number_of_replicas": 0}, ) success, failed = helpers.bulk( sync_client, [{"a": 42}, {"a": "c"}], index="i", stats_only=True, raise_on_error=False, ) assert 1 == success assert 1 == failed mock_scroll_responses = [ ObjectApiResponse( meta=None, raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": [{"scroll_data": 42}]}, }, ), ObjectApiResponse( meta=None, raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, ), ] @pytest.fixture(scope="function") def scan_teardown(sync_client): yield sync_client.clear_scroll(scroll_id="_all") @pytest.mark.usefixtures("scan_teardown") def test_order_can_be_preserved(sync_client): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append({"answer": x, "correct": x == 42}) sync_client.bulk(operations=bulk, refresh=True) docs = list( helpers.scan( sync_client, index="test_index", query={"sort": "answer"}, preserve_order=True, ) ) assert 100 == len(docs) assert list(map(str, range(100))) == list(d["_id"] for d in docs) assert list(range(100)) == list(d["_source"]["answer"] for d in docs) @pytest.mark.usefixtures("scan_teardown") def test_all_documents_are_read(sync_client): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append({"answer": x, "correct": x == 42}) sync_client.bulk(operations=bulk, refresh=True) docs = list(helpers.scan(sync_client, index="test_index", size=2)) assert 100 == len(docs) assert set(map(str, range(100))) == {d["_id"] for d in docs} assert set(range(100)) == {d["_source"]["answer"] for d in docs} @pytest.mark.usefixtures("scan_teardown") def test_scroll_error(sync_client): bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) sync_client.bulk(operations=bulk, refresh=True) with ( patch.object(sync_client, "options", return_value=sync_client), patch.object(sync_client, "scroll") as scroll_mock, ): scroll_mock.side_effect = mock_scroll_responses data = list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=False, clear_scroll=False, ) ) assert len(data) == 3 assert data[-1] == {"scroll_data": 42} scroll_mock.side_effect = mock_scroll_responses with pytest.raises(ScanError): data = list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=True, clear_scroll=False, ) ) assert len(data) == 3 assert data[-1] == {"scroll_data": 42} def test_initial_search_error(sync_client): with ( patch.object( sync_client, "search", return_value=ObjectApiResponse( meta=None, raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 4, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, ), ), patch.object(sync_client, "options", return_value=sync_client), ): with ( patch.object(sync_client, "scroll") as scroll_mock, patch.object(sync_client, "clear_scroll") as clear_scroll_mock, ): scroll_mock.side_effect = mock_scroll_responses data = list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=False ) ) assert data == [{"search_data": 1}, {"scroll_data": 42}] # Scrolled at least once and received a scroll_id to clear. scroll_mock.assert_called_with( scroll_id="dummy_id", scroll="5m", ) clear_scroll_mock.assert_called_once_with( scroll_id="dummy_id", ) with ( patch.object(sync_client, "scroll") as scroll_mock, patch.object(sync_client, "clear_scroll") as clear_scroll_mock, ): scroll_mock.side_effect = mock_scroll_responses with pytest.raises(ScanError): data = list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=True ) ) assert data == [{"search_data": 1}] # Never scrolled but did receive a scroll_id to clear. scroll_mock.assert_not_called() clear_scroll_mock.assert_called_once_with( scroll_id="dummy_id", ) def test_no_scroll_id_fast_route(sync_client): with ( patch.object( sync_client, "search", return_value=ObjectApiResponse(meta=None, raw={"no": "_scroll_id"}), ) as search_mock, patch.object(sync_client, "scroll") as scroll_mock, patch.object(sync_client, "clear_scroll") as clear_scroll_mock, patch.object(sync_client, "options", return_value=sync_client) as options, ): data = list(helpers.scan(sync_client, index="test_index")) assert data == [] search_mock.assert_called_once_with( sort="_doc", scroll="5m", size=1000, index="test_index", ) options.assert_called_once_with(request_timeout=None) scroll_mock.assert_not_called() clear_scroll_mock.assert_not_called() @pytest.mark.parametrize( "kwargs", [ {"api_key": ("name", "value")}, {"http_auth": ("username", "password")}, {"basic_auth": ("username", "password")}, {"bearer_auth": "token"}, {"headers": {"custom", "header"}}, ], ) @pytest.mark.usefixtures("scan_teardown") def test_scan_auth_kwargs_forwarded(sync_client, kwargs): ((key, val),) = kwargs.items() with ( patch.object(sync_client, "options", return_value=sync_client) as options, patch.object( sync_client, "search", return_value=ObjectApiResponse( meta=None, raw={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, ), ), patch.object( sync_client, "scroll", return_value=ObjectApiResponse( meta=None, raw={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, ), ), patch.object( sync_client, "clear_scroll", return_value=ObjectApiResponse(meta=None, raw={}), ), ): data = list(helpers.scan(sync_client, index="test_index", **kwargs)) assert data == [{"search_data": 1}] assert options.call_args_list == [ call( request_timeout=None, **{key if key != "http_auth" else "basic_auth": val} ), call(ignore_status=404), ] def test_scan_auth_kwargs_favor_scroll_kwargs_option(sync_client): with ( patch.object(sync_client, "options", return_value=sync_client) as options_mock, patch.object( sync_client, "search", return_value=ObjectApiResponse( raw={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ), ) as search_mock, patch.object( sync_client, "scroll", return_value=ObjectApiResponse( raw={ "_scroll_id": "scroll_id", "_shards": {"successful": 5, "total": 5, "skipped": 0}, "hits": {"hits": []}, }, meta=None, ), ) as scroll_mock, patch.object( sync_client, "clear_scroll", return_value=ObjectApiResponse(raw={}, meta=None), ), ): data = list( helpers.scan( sync_client, index="test_index", scroll_kwargs={"headers": {"scroll": "kwargs"}, "sort": "asc"}, headers={"not scroll": "kwargs"}, ) ) assert data == [{"search_data": 1}] # Assert that we see 'scroll_kwargs' options used instead of 'kwargs' assert options_mock.call_args_list == [ call(request_timeout=None, headers={"not scroll": "kwargs"}), call(headers={"scroll": "kwargs"}), call(ignore_status=404), ] search_mock.assert_called_once_with( sort="_doc", index="test_index", scroll="5m", size=1000 ) scroll_mock.assert_called_once_with( scroll_id="scroll_id", scroll="5m", sort="asc" ) def test_log_warning_on_shard_failures(sync_client): bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) sync_client.bulk(operations=bulk, refresh=True) with ( patch("elasticsearch.helpers.actions.logger") as logger_mock, patch.object(sync_client, "options", return_value=sync_client), patch.object(sync_client, "scroll") as scroll_mock, ): scroll_mock.side_effect = mock_scroll_responses list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=False, clear_scroll=False, ) ) logger_mock.warning.assert_called() scroll_mock.side_effect = mock_scroll_responses try: list( helpers.scan( sync_client, index="test_index", size=2, raise_on_error=True, clear_scroll=False, ) ) except ScanError: pass logger_mock.warning.assert_called() def test_clear_scroll(sync_client): bulk = [] for x in range(4): bulk.append({"index": {"_index": "test_index"}}) bulk.append({"value": x}) sync_client.bulk(operations=bulk, refresh=True) with ( patch.object(sync_client, "options", return_value=sync_client), patch.object( sync_client, "clear_scroll", wraps=sync_client.clear_scroll ) as clear_scroll_mock, ): list(helpers.scan(sync_client, index="test_index", size=2)) clear_scroll_mock.assert_called_once() clear_scroll_mock.reset_mock() list(helpers.scan(sync_client, index="test_index", size=2, clear_scroll=True)) clear_scroll_mock.assert_called_once() clear_scroll_mock.reset_mock() list(helpers.scan(sync_client, index="test_index", size=2, clear_scroll=False)) clear_scroll_mock.assert_not_called() def test_shards_no_skipped_field(sync_client): # Test that scan doesn't fail if 'hits.skipped' isn't available. with ( patch.object(sync_client, "options", return_value=sync_client), patch.object( sync_client, "search", return_value=ObjectApiResponse( raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 5, "total": 5}, "hits": {"hits": [{"search_data": 1}]}, }, meta=None, ), ), patch.object(sync_client, "scroll") as scroll_mock, patch.object(sync_client, "clear_scroll"), ): scroll_mock.side_effect = [ ObjectApiResponse( raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 5, "total": 5}, "hits": {"hits": [{"scroll_data": 42}]}, }, meta=None, ), ObjectApiResponse( raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 5, "total": 5}, "hits": {"hits": []}, }, meta=None, ), ] data = list( helpers.scan(sync_client, index="test_index", size=2, raise_on_error=True) ) assert data == [{"search_data": 1}, {"scroll_data": 42}] @pytest.mark.parametrize( "scan_kwargs", [ {"from": 1}, {"from_": 1}, {"query": {"from": 1}}, {"query": {"from_": 1}}, {"query": {"query": {"match_all": {}}}, "from": 1}, {"query": {"query": {"match_all": {}}}, "from_": 1}, ], ) def test_scan_from_keyword_is_aliased(sync_client, scan_kwargs): with ( patch.object(sync_client, "options", return_value=sync_client), patch.object( sync_client, "search", return_value=ObjectApiResponse( raw={ "_scroll_id": "dummy_id", "_shards": {"successful": 5, "total": 5}, "hits": {"hits": []}, }, meta=None, ), ) as search_mock, patch.object(sync_client, "clear_scroll"), ): list(helpers.scan(sync_client, index="test_index", **scan_kwargs)) assert search_mock.call_args[1]["from_"] == 1 assert "from" not in search_mock.call_args[1] @pytest.fixture(scope="function") def reindex_setup(sync_client): bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index", "_id": x}}) bulk.append( { "answer": x, "correct": x == 42, "type": "answers" if x % 2 == 0 else "questions", } ) sync_client.bulk(operations=bulk, refresh=True) @pytest.mark.usefixtures("reindex_setup") def test_reindex_passes_kwargs_to_scan_and_bulk(sync_client): helpers.reindex( sync_client, "test_index", "prod_index", scan_kwargs={"q": "type:answers"}, bulk_kwargs={"refresh": True}, ) assert sync_client.indices.exists(index="prod_index") assert 50 == sync_client.count(index="prod_index", q="type:answers")["count"] assert {"answer": 42, "correct": True, "type": "answers"} == sync_client.get( index="prod_index", id=42 )["_source"] @pytest.mark.usefixtures("reindex_setup") def test_reindex_accepts_a_query(sync_client): helpers.reindex( sync_client, "test_index", "prod_index", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}}, ) sync_client.indices.refresh() assert sync_client.indices.exists(index="prod_index") assert 50 == sync_client.count(index="prod_index", q="type:answers")["count"] assert {"answer": 42, "correct": True, "type": "answers"} == sync_client.get( index="prod_index", id=42 )["_source"] @pytest.mark.usefixtures("reindex_setup") def test_all_documents_get_moved(sync_client): helpers.reindex(sync_client, "test_index", "prod_index") sync_client.indices.refresh() assert sync_client.indices.exists(index="prod_index") assert 50 == sync_client.count(index="prod_index", q="type:questions")["count"] assert 50 == sync_client.count(index="prod_index", q="type:answers")["count"] assert {"answer": 42, "correct": True, "type": "answers"} == sync_client.get( index="prod_index", id=42 )["_source"] @pytest.fixture(scope="function") def parent_child_reindex_setup(sync_client): body = { "settings": {"number_of_shards": 1, "number_of_replicas": 0}, "mappings": { "properties": { "question_answer": { "type": "join", "relations": {"question": "answer"}, } } }, } sync_client.indices.create(index="test-index", body=body) sync_client.indices.create(index="real-index", body=body) sync_client.index(index="test-index", id=42, body={"question_answer": "question"}) sync_client.index( index="test-index", id=47, routing=42, body={"some": "data", "question_answer": {"name": "answer", "parent": 42}}, ) sync_client.indices.refresh(index="test-index") @pytest.mark.usefixtures("parent_child_reindex_setup") def test_children_are_reindexed_correctly(sync_client): helpers.reindex(sync_client, "test-index", "real-index") q = sync_client.get(index="real-index", id=42) assert { "_id": "42", "_index": "real-index", "_primary_term": 1, "_seq_no": 0, "_source": {"question_answer": "question"}, "_version": 1, "found": True, } == q q = sync_client.get(index="test-index", id=47, routing=42) assert { "_routing": "42", "_id": "47", "_index": "test-index", "_primary_term": 1, "_seq_no": 1, "_source": { "some": "data", "question_answer": {"name": "answer", "parent": 42}, }, "_version": 1, "found": True, } == q @pytest.fixture(scope="function") def reindex_data_stream_setup(sync_client): dt = datetime.now(tz=tz.UTC) bulk = [] for x in range(100): bulk.append({"index": {"_index": "test_index_stream", "_id": x}}) bulk.append( { "answer": x, "correct": x == 42, "type": "answers" if x % 2 == 0 else "questions", "@timestamp": (dt - timedelta(days=x)).isoformat(), } ) sync_client.bulk(operations=bulk, refresh=True) sync_client.indices.put_index_template( name="my-index-template", body={ "index_patterns": ["py-*-*"], "data_stream": {}, }, ) sync_client.indices.create_data_stream(name="py-test-stream") sync_client.indices.refresh() @pytest.mark.usefixtures("reindex_data_stream_setup") @pytest.mark.parametrize("op_type", [None, "create"]) def test_reindex_index_datastream(op_type, sync_client): helpers.reindex( sync_client, source_index="test_index_stream", target_index="py-test-stream", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}}, op_type=op_type, ) sync_client.indices.refresh() assert sync_client.indices.exists(index="py-test-stream") assert 50 == sync_client.count(index="py-test-stream", q="type:answers")["count"] @pytest.mark.usefixtures("reindex_data_stream_setup") def test_reindex_index_datastream_op_type_index(sync_client): with pytest.raises( ValueError, match="Data streams must have 'op_type' set to 'create'" ): helpers.reindex( sync_client, source_index="test_index_stream", target_index="py-test-stream", query={"query": {"bool": {"filter": {"term": {"type": "answers"}}}}}, op_type="_index", ) python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_mapbox_vector_tile.py000066400000000000000000000106021506101734100313640ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch import RequestError @pytest.fixture(scope="function") def mvt_setup(sync_client): sync_client.indices.create( index="museums", body={ "mappings": { "properties": { "location": {"type": "geo_point"}, "name": {"type": "keyword"}, "price": {"type": "long"}, "included": {"type": "boolean"}, } } }, ) sync_client.bulk( index="museums", body=[ {"index": {"_id": "1"}}, { "location": "52.374081,4.912350", "name": "NEMO Science Museum", "price": 1750, "included": True, }, {"index": {"_id": "2"}}, { "location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis", "price": 1500, "included": False, }, {"index": {"_id": "3"}}, { "location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum", "price": 1650, "included": True, }, {"index": {"_id": "4"}}, { "location": "52.371667,4.914722", "name": "Amsterdam Centre for Architecture", "price": 0, "included": True, }, ], refresh=True, ) def test_mapbox_vector_tile_error(sync_client, mvt_setup): sync_client.search_mvt( index="museums", zoom=13, x=4207, y=2692, field="location", ) with pytest.raises(RequestError) as e: sync_client.search_mvt( index="museums", zoom=-100, x=4207, y=2692, field="location", ) assert str(e.value) == ( "BadRequestError(400, 'illegal_argument_exception', " "'Invalid geotile_grid precision of -100. Must be between 0 and 29.')" ) assert e.value.meta.status == 400 assert e.value.status_code == 400 assert e.value.body == { "error": { "root_cause": [ { "type": "illegal_argument_exception", "reason": "Invalid geotile_grid precision of -100. Must be between 0 and 29.", } ], "type": "illegal_argument_exception", "reason": "Invalid geotile_grid precision of -100. Must be between 0 and 29.", }, "status": 400, } def test_mapbox_vector_tile_response(sync_client, mvt_setup): try: import mapbox_vector_tile except ImportError: return pytest.skip("Requires the 'mapbox-vector-tile' package") resp = sync_client.search_mvt( index="museums", zoom=13, x=4207, y=2692, field="location", body={ "grid_precision": 2, "fields": ["name", "price"], "query": {"term": {"included": True}}, "aggs": { "min_price": {"min": {"field": "price"}}, "max_price": {"max": {"field": "price"}}, "avg_price": {"avg": {"field": "price"}}, }, }, ) assert resp.meta.status == 200 assert isinstance(resp.body, bytes) # Decode the binary as MVT tile = mapbox_vector_tile.decode(resp.body) # Assert some general things about the structure, mostly we want # to know that we got back a valid MVT. assert set(tile.keys()) == {"hits", "aggs", "meta"} python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_otel.py000066400000000000000000000075221506101734100264510ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import pytest import elasticsearch import elasticsearch.helpers from ..test_otel import setup_tracing pytestmark = [ pytest.mark.skipif( "TEST_WITH_OTEL" not in os.environ, reason="TEST_WITH_OTEL is not set" ), pytest.mark.otel, ] def test_otel_end_to_end(sync_client): tracer, memory_exporter = setup_tracing() sync_client._otel.tracer = tracer resp = sync_client.search(index="logs-*", query={"match_all": {}}) assert resp.meta.status == 200 spans = memory_exporter.get_finished_spans() assert len(spans) == 1 assert spans[0].name == "search" expected_attributes = { "http.request.method": "POST", "db.system.name": "elasticsearch", "db.operation.name": "search", "db.operation.parameter.index": "logs-*", } # Assert expected atttributes are here, but allow other attributes too # to make this test robust to elastic-transport changes assert expected_attributes.items() <= spans[0].attributes.items() @pytest.mark.parametrize( "bulk_helper_name", ["bulk", "streaming_bulk", "parallel_bulk"] ) def test_otel_bulk(sync_client, elasticsearch_url, bulk_helper_name): tracer, memory_exporter = setup_tracing() # Create a new client with our tracer sync_client = sync_client.options() sync_client._otel.tracer = tracer # "Disable" options to keep our custom tracer sync_client.options = lambda: sync_client docs = [{"answer": x, "helper": bulk_helper_name, "_id": x} for x in range(10)] bulk_function = getattr(elasticsearch.helpers, bulk_helper_name) if bulk_helper_name == "bulk": success, failed = bulk_function( sync_client, docs, index="test-index", chunk_size=2, refresh=True ) assert success, failed == (5, 0) else: for ok, resp in bulk_function( sync_client, docs, index="test-index", chunk_size=2, refresh=True ): assert ok is True memory_exporter.shutdown() assert 10 == sync_client.count(index="test-index")["count"] assert {"answer": 4, "helper": bulk_helper_name} == sync_client.get( index="test-index", id=4 )["_source"] spans = list(memory_exporter.get_finished_spans()) parent_span = spans.pop() assert parent_span.name == f"helpers.{bulk_helper_name}" assert parent_span.attributes == { "db.system.name": "elasticsearch", "db.operation.name": f"helpers.{bulk_helper_name}", "http.request.method": "null", } assert len(spans) == 5 for span in spans: assert span.name == "bulk" expected_attributes = { "http.request.method": "PUT", "db.system.name": "elasticsearch", "db.operation.name": "bulk", "db.operation.parameter.index": "test-index", } # Assert expected atttributes are here, but allow other attributes too # to make this test robust to elastic-transport changes assert expected_attributes.items() <= spans[0].attributes.items() assert span.parent.trace_id == parent_span.context.trace_id python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_responses.py000066400000000000000000000027771506101734100275360ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. def test_text_response(sync_client): resp = sync_client.cat.tasks() assert resp.meta.status == 200 assert isinstance(resp.body, str) assert str(resp.body) == str(resp) def test_object_response(sync_client): resp = sync_client.search(size=1) assert isinstance(resp.body, dict) assert set(resp) == set(resp.body) assert resp.items() assert resp.keys() assert str(resp) == str(resp.body) assert resp["hits"] == resp.body["hits"] assert type(resp.copy()) is dict def test_exists_response(sync_client): resp = sync_client.indices.exists(index="no") assert resp.body is False assert not resp if resp: assert False, "Didn't evaluate to 'False'" assert str(resp) == "False" python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_rest_api_spec.py000066400000000000000000000522731506101734100303310ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Dynamically generated set of TestCases based on set of yaml files describing some integration tests. These files are shared among all official Elasticsearch clients. """ import io import json import os import re import warnings import zipfile from typing import Tuple, Union import pytest import urllib3 import yaml from elasticsearch import ApiError, ElasticsearchWarning, RequestError from elasticsearch._sync.client.utils import _base64_auth_header from elasticsearch.compat import string_types # some params had to be changed in python, keep track of them so we can rename # those in the tests accordingly PARAMS_RENAMES = {"from": "from_"} API_PARAMS_RENAMES = { "snapshot.create_repository": {"repository": "name"}, "snapshot.delete_repository": {"repository": "name"}, "snapshot.get_repository": {"repository": "name"}, "snapshot.cleanup_repository": {"repository": "name"}, "snapshot.verify_repository": {"repository": "name"}, "ilm.delete_lifecycle": {"policy", "name"}, "ilm.get_lifecycle": {"policy": "name"}, "ilm.put_lifecycle": {"policy": "name"}, } # mapping from catch values to http status codes CATCH_CODES = {"missing": 404, "conflict": 409, "unauthorized": 401} # test features we have implemented IMPLEMENTED_FEATURES = { "gtelte", "stash_in_path", "headers", "catch_unauthorized", "default_shards", "warnings", "allowed_warnings", "contains", "arbitrary_key", "transform_and_set", } # broken YAML tests on some releases FAILING_TESTS = { # ping has a custom implementation in Python and returns a boolean "ping/ping", # Not investigated yet "cat/aliases", "cat/fielddata", "cluster/delete_voting_config_exclusions", "cluster/voting_config_exclusions", "entsearch/10_basic", "indices/clone", "indices/data_stream_mappings[0]", "indices/resolve_cluster", "indices/settings", "indices/split", "indices/simulate_template_stack", "logstash/10_basic", "machine_learning/30_trained_model_stack", "machine_learning/jobs_crud", "scroll/10_basic", "security/10_api_key_basic", "security/130_user_profile", "transform/10_basic", } SKIPPED_TESTS = { # Timeouts # https://github.com/elastic/elasticsearch-serverless-python/issues/63 "cluster/cluster_info[0]", "inference/10_basic[0]", "machine_learning/20_trained_model[0]", } XPACK_FEATURES = None RUN_ASYNC_REST_API_TESTS = os.environ.get("PYTHON_CONNECTION_CLASS") == "requests" FALSEY_VALUES = ("", None, False, 0, 0.0) class YamlRunner: def __init__(self, client): self.client = client self.last_response = None self._run_code = None self._setup_code = None self._teardown_code = None self._state = {} def use_spec(self, test_spec): self._setup_code = test_spec.pop("setup", None) self._run_code = test_spec.pop("run", None) self._teardown_code = test_spec.pop("teardown", None) def setup(self): # Pull skips from individual tests to not do unnecessary setup. skip_code = [] for action in self._run_code: assert len(action) == 1 action_type, _ = list(action.items())[0] if action_type == "skip": skip_code.append(action) else: break if self._setup_code or skip_code: self.section("setup") if skip_code: self.run_code(skip_code) if self._setup_code: self.run_code(self._setup_code) def teardown(self): if self._teardown_code: self.section("teardown") self.run_code(self._teardown_code) def section(self, name): print(("=" * 10) + " " + name + " " + ("=" * 10)) def run(self): try: self.setup() self.section("test") self.run_code(self._run_code) finally: try: self.teardown() except Exception: pass def run_code(self, test): """Execute an instruction based on it's type.""" for action in test: assert len(action) == 1 action_type, action = list(action.items())[0] print(action_type, action) if hasattr(self, "run_" + action_type): getattr(self, "run_" + action_type)(action) else: raise RuntimeError(f"Invalid action type {action_type!r}") def run_do(self, action): api = self.client headers = action.pop("headers", None) catch = action.pop("catch", None) warn = action.pop("warnings", ()) allowed_warnings = action.pop("allowed_warnings", ()) assert len(action) == 1 # Remove the x_pack_rest_user authentication # if it's given via headers. We're already authenticated # via the 'elastic' user. if ( headers and headers.get("Authorization", None) == "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" ): headers.pop("Authorization") method, args = list(action.items())[0] if headers: args["headers"] = headers # locate api endpoint for m in method.split("."): if not hasattr(api, m): pytest.skip("This API isn't implemented yet") api = getattr(api, m) # Sometimes the 'body' parameter is encoded as a string instead of raw. if "body" in args: try: args["body"] = json.loads(args["body"]) except (TypeError, ValueError): pass if isinstance(args["body"], dict): # Detect when there are duplicate options that aren't the same value. # In this case the test isn't testing the client, it's testing Elasticsearch # and its ability to reject multiple values so we either combine # like values or skip the test entirely as unnecessary for the client. duplicate_args = set(args["body"]).intersection(args) if duplicate_args: for arg in list(duplicate_args): if args["body"][arg] == args[arg]: args["body"].pop(arg) else: pytest.skip( "Contains a duplicate parameter with a different value" ) # some parameters had to be renamed to not clash with python builtins, # compensate renames = PARAMS_RENAMES.copy() renames.update(API_PARAMS_RENAMES.get(method, {})) for k in renames: if k in args: args[renames[k]] = args.pop(k) # resolve vars for k in args: args[k] = self._resolve(args[k]) warnings.simplefilter("always", category=ElasticsearchWarning) with warnings.catch_warnings(record=True) as caught_warnings: try: self.last_response = api(**args).body except Exception as e: self._skip_intentional_type_errors(e) if not catch: raise self.run_catch(catch, e) else: if catch: raise AssertionError( f"Failed to catch {catch!r} in {self.last_response!r}." ) # Filter out warnings raised by other components. caught_warnings = [ str(w.message) for w in caught_warnings if w.category == ElasticsearchWarning and str(w.message) not in allowed_warnings ] # Sorting removes the issue with order raised. We only care about # if all warnings are raised in the single API call. if warn and sorted(warn) != sorted(caught_warnings): raise AssertionError( "Expected warnings not equal to actual warnings: expected=%r actual=%r" % (warn, caught_warnings) ) def run_catch(self, catch, exception): if catch == "param" or isinstance(exception, TypeError): assert isinstance(exception, TypeError) self.last_response = None return assert isinstance(exception, ApiError) if catch in CATCH_CODES: assert CATCH_CODES[catch] == exception.meta.status elif catch[0] == "/" and catch[-1] == "/": assert ( re.search(catch[1:-1], str(exception.body)), f"{catch} not in {str(exception.body)!r}", ) is not None self.last_response = exception.body def run_skip(self, skip): if "features" in skip: features = skip["features"] if not isinstance(features, (tuple, list)): features = [features] for feature in features: if feature in IMPLEMENTED_FEATURES: continue pytest.skip(f"feature '{feature}' is not supported") def run_gt(self, action): for key, value in action.items(): value = self._resolve(value) assert self._lookup(key) > value def run_gte(self, action): for key, value in action.items(): value = self._resolve(value) assert self._lookup(key) >= value def run_lt(self, action): for key, value in action.items(): value = self._resolve(value) assert self._lookup(key) < value def run_lte(self, action): for key, value in action.items(): value = self._resolve(value) assert self._lookup(key) <= value def run_set(self, action): for key, value in action.items(): value = self._resolve(value) self._state[value] = self._lookup(key) def run_is_false(self, action): try: value = self._lookup(action) except AssertionError: pass else: assert value in FALSEY_VALUES def run_is_true(self, action): value = self._lookup(action) assert value not in FALSEY_VALUES def run_length(self, action): for path, expected in action.items(): value = self._lookup(path) expected = self._resolve(expected) assert expected == len(value) def run_match(self, action): for path, expected in action.items(): value = self._lookup(path) expected = self._resolve(expected) if ( isinstance(expected, string_types) and expected.startswith("/") and expected.endswith("/") ): expected = re.compile(expected[1:-1], re.VERBOSE | re.MULTILINE) assert expected.search(value), f"{value!r} does not match {expected!r}" elif isinstance(value, list) and isinstance(expected, list): assert len(value) == len( expected ), f"Length between {value!r} and {expected!r} wasn't equal" [self._assert_match_equals(a, b) for a, b in zip(value, expected)] else: self._assert_match_equals(value, expected) def run_contains(self, action): for path, expected in action.items(): value = self._lookup(path) # list[dict[str,str]] is returned expected = self._resolve(expected) # dict[str, str] if expected not in value: raise AssertionError(f"{expected} is not contained by {value}") def run_transform_and_set(self, action): for key, value in action.items(): # Convert #base64EncodeCredentials(id,api_key) to ["id", "api_key"] if "#base64EncodeCredentials" in value: value = value.replace("#base64EncodeCredentials", "") value = value.replace("(", "").replace(")", "").split(",") self._state[key] = _base64_auth_header( (self._lookup(value[0]), self._lookup(value[1])) ) def _resolve(self, value): # resolve variables if isinstance(value, string_types) and "$" in value: for k, v in self._state.items(): for key_replace in ("${" + k + "}", "$" + k): if value == key_replace: value = v break # We only do the in-string replacement if using ${...} elif ( key_replace.startswith("${") and isinstance(value, string_types) and key_replace in value ): value = value.replace(key_replace, str(v)) break # We only do the in-string replacement if value is JSON string # E.g. '{\n "password_hash" : "$hash"\n}\n' elif ( key_replace.startswith("$") and isinstance(value, string_types) and key_replace in value and not value.startswith("$") ): value = value.replace(key_replace, str(v)) break if isinstance(value, string_types): value = value.strip() elif isinstance(value, dict): value = {k: self._resolve(v) for (k, v) in value.items()} elif isinstance(value, list): value = list(map(self._resolve, value)) return value def _lookup(self, path): # fetch the possibly nested value from last_response value = self.last_response if path == "$body": return value path = path.replace(r"\.", "\1") for step in path.split("."): if not step: continue # We check body again to handle E.g. '$body.$backing_index.data_stream' if step.startswith("$body"): continue step = step.replace("\1", ".") step = self._resolve(step) if ( isinstance(step, string_types) and step.isdigit() and isinstance(value, list) ): step = int(step) assert isinstance(value, list) assert len(value) > step elif step == "_arbitrary_key_": return list(value.keys())[0] else: assert step in value value = value[step] return value def _feature_enabled(self, name): global XPACK_FEATURES if XPACK_FEATURES is None: try: xinfo = self.client.xpack.info() XPACK_FEATURES = { f for f in xinfo["features"] if xinfo["features"][f]["enabled"] } IMPLEMENTED_FEATURES.add("xpack") except RequestError: XPACK_FEATURES = set() IMPLEMENTED_FEATURES.add("no_xpack") return name in XPACK_FEATURES def _assert_match_equals(self, a, b): # Handle for large floating points with 'E' if isinstance(b, string_types) and isinstance(a, float) and "e" in repr(a): a = repr(a).replace("e+", "E") assert a == b, f"{a!r} does not match {b!r}" def _skip_intentional_type_errors(self, e: Exception): if isinstance(e, TypeError) and ( "unexpected keyword argument" in str(e) or "required keyword-only argument" in str(e) ): pytest.skip("API intentionally used incorrectly in test") @pytest.fixture(scope="function") def sync_runner(sync_client_factory): # sync_client_factory does not wipe the cluster between tests return YamlRunner(sync_client_factory) # Source: https://stackoverflow.com/a/37958106/5763213 class NoDatesSafeLoader(yaml.SafeLoader): @classmethod def remove_implicit_resolver(cls, tag_to_remove): # Make a copy from the parent class to ensure # we're not modifying the parent. if "yaml_implicit_resolvers" not in cls.__dict__: cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy() # Remove the resolver everywhere it shows up. for key, mappings in cls.yaml_implicit_resolvers.items(): cls.yaml_implicit_resolvers[key] = [ (tag, regexp) for tag, regexp in mappings if tag != tag_to_remove ] # We don't want to magically load date/times into objects. Keep as strings. NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp") YAML_TEST_SPECS = [] # Try loading the REST API test specs from the Elastic Artifacts API try: # Construct the HTTP and Elasticsearch client http = urllib3.PoolManager(retries=urllib3.Retry(total=10)) yaml_tests_url = ( "https://api.github.com/repos/elastic/elasticsearch-clients-tests/zipball/main" ) # Download the zip and start reading YAML from the files in memory package_zip = zipfile.ZipFile(io.BytesIO(http.request("GET", yaml_tests_url).data)) for yaml_file in package_zip.namelist(): if not re.match(r"^.*\/tests\/.*\.ya?ml$", yaml_file): continue yaml_tests = list( yaml.load_all(package_zip.read(yaml_file), Loader=NoDatesSafeLoader) ) # Each file has a `requires` section with `serverless` and `stack` # boolean entries indicating whether the test should run with # serverless, stack or both. Additionally, each file may have a section # named 'setup' or 'teardown', these sets of steps should be run at the # beginning and end of every other test within the file so we do one # pass to capture those. requires = setup_steps = teardown_steps = None test_numbers_and_steps = [] test_number = 0 for yaml_test in yaml_tests: test_name, test_step = yaml_test.popitem() if test_name == "requires": requires = test_step elif test_name == "setup": setup_steps = test_step elif test_name == "teardown": teardown_steps = test_step else: test_numbers_and_steps.append((test_number, test_step)) test_number += 1 if not requires["stack"]: continue # Now we combine setup, teardown, and test_steps into # a set of pytest.param() instances for test_number, test_step in test_numbers_and_steps: # Build the id from the name of the YAML file and the number within # that file. Most important step is to remove most of the file path # prefixes and the .yml suffix. test_path = "/".join(yaml_file.split("/")[2:]) pytest_test_name = test_path.rpartition(".")[0].replace(".", "/") for prefix in ("rest-api-spec/", "test/", "free/", "platinum/"): if pytest_test_name.startswith(prefix): pytest_test_name = pytest_test_name[len(prefix) :] pytest_param_id = "%s[%d]" % (pytest_test_name, test_number) pytest_param = { "setup": setup_steps, "run": test_step, "teardown": teardown_steps, } # Skip either 'test_name' or 'test_name[x]' if pytest_test_name in FAILING_TESTS or pytest_param_id in FAILING_TESTS: pytest_param["fail"] = True elif pytest_test_name in SKIPPED_TESTS or pytest_param_id in SKIPPED_TESTS: pytest_param["skip"] = True YAML_TEST_SPECS.append(pytest.param(pytest_param, id=pytest_param_id)) except Exception as e: warnings.warn(f"Could not load REST API tests: {str(e)}") def _pytest_param_sort_key(param: pytest.param) -> Tuple[Union[str, int], ...]: # Sorts pytest parameters by their ID in a human-friendly way. return tuple(int(x) if x.isdigit() else x for x in re.split(r"([0-9]+)", param.id)) # Sort the tests by ID so they're grouped together nicely. YAML_TEST_SPECS = sorted(YAML_TEST_SPECS, key=_pytest_param_sort_key) if not RUN_ASYNC_REST_API_TESTS: @pytest.mark.parametrize("test_spec", YAML_TEST_SPECS) def test_rest_api_spec(test_spec, sync_runner): if test_spec.get("fail", False): pytest.xfail("Manually marked as failing in 'FAILING_TESTS'") elif test_spec.get("skip", False): pytest.skip("Manually marked as skipped") sync_runner.use_spec(test_spec) sync_runner.run() python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/000077500000000000000000000000001506101734100275055ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/__init__.py000066400000000000000000000060421506101734100316200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import List from elastic_transport import Transport from elasticsearch.helpers.vectorstore import EmbeddingService class RequestSavingTransport(Transport): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.requests: list = [] def perform_request(self, *args, **kwargs): self.requests.append(kwargs) return super().perform_request(*args, **kwargs) class FakeEmbeddings(EmbeddingService): """Fake embeddings functionality for testing.""" def __init__(self, dimensionality: int = 10) -> None: self.dimensionality = dimensionality def embed_documents(self, texts: List[str]) -> List[List[float]]: """Return simple embeddings. Embeddings encode each text as its index.""" return [ [float(1.0)] * (self.dimensionality - 1) + [float(i)] for i in range(len(texts)) ] def embed_query(self, text: str) -> List[float]: """Return constant query embeddings. Embeddings are identical to embed_documents(texts)[0]. Distance to each text will be that text's index, as it was passed to embed_documents. """ return [float(1.0)] * (self.dimensionality - 1) + [float(0.0)] class ConsistentFakeEmbeddings(FakeEmbeddings): """Fake embeddings which remember all the texts seen so far to return consistent vectors for the same texts.""" def __init__(self, dimensionality: int = 10) -> None: self.known_texts: List[str] = [] self.dimensionality = dimensionality def embed_documents(self, texts: List[str]) -> List[List[float]]: """Return consistent embeddings for each text seen so far.""" out_vectors = [] for text in texts: if text not in self.known_texts: self.known_texts.append(text) vector = [float(1.0)] * (self.dimensionality - 1) + [ float(self.known_texts.index(text) + 1) ] out_vectors.append(vector) return out_vectors def embed_query(self, text: str) -> List[float]: """Return consistent embeddings for the text, if seen before, or a constant one if the text is unknown.""" result = self.embed_documents([text]) return result[0] python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/conftest.py000066400000000000000000000035431506101734100317110ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import uuid import pytest from ...utils import wipe_cluster from ..conftest import _create from . import RequestSavingTransport @pytest.fixture(scope="function") def index() -> str: return f"test_{uuid.uuid4().hex}" @pytest.fixture(scope="function") def sync_client_request_saving_factory(elasticsearch_url): client = None try: client = _create(elasticsearch_url) # Wipe the cluster before we start testing just in case it wasn't wiped # cleanly from the previous run of pytest? wipe_cluster(client) finally: client.close() try: # Recreate client with a transport that saves requests. client = _create(elasticsearch_url, RequestSavingTransport) yield client finally: if client: client.close() @pytest.fixture(scope="function") def sync_client_request_saving(sync_client_request_saving_factory): try: yield sync_client_request_saving_factory finally: # Wipe the cluster clean after every test execution. wipe_cluster(sync_client_request_saving_factory) python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/docker-compose.yml000066400000000000000000000017221506101734100331440ustar00rootroot00000000000000version: "3" services: elasticsearch: image: elasticsearch:8.13.0 environment: - action.destructive_requires_name=false # allow wildcard index deletions - discovery.type=single-node - xpack.license.self_generated.type=trial - xpack.security.enabled=false # disable password and TLS; never do this in production! ports: - "9200:9200" healthcheck: test: [ "CMD-SHELL", "curl --silent --fail http://localhost:9200/_cluster/health || exit 1" ] interval: 10s retries: 60 # Currently fails on Mac: https://github.com/elastic/elasticsearch/issues/106206 elasticsearch-with-model: image: docker.elastic.co/eland/eland depends_on: - elasticsearch restart: no command: sh -c " sleep 10 && eland_import_hub_model \ --hub-model-id sentence-transformers/all-minilm-l6-v2 \ --url http://elasticsearch:9200 \ --start " python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/test_embedding_service.py000066400000000000000000000065611506101734100345640ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import re import pytest from elasticsearch import Elasticsearch from elasticsearch.helpers.vectorstore import ElasticsearchEmbeddings from elasticsearch.helpers.vectorstore._sync._utils import model_is_deployed # deployed with # https://www.elastic.co/guide/en/machine-learning/current/ml-nlp-text-emb-vector-search-example.html MODEL_ID = os.getenv("MODEL_ID", "sentence-transformers__all-minilm-l6-v2") NUM_DIMENSIONS = int(os.getenv("NUM_DIMENSIONS", "384")) def test_elasticsearch_embedding_documents(sync_client: Elasticsearch) -> None: """Test Elasticsearch embedding documents.""" if not model_is_deployed(sync_client, MODEL_ID): pytest.skip(f"{MODEL_ID} model is not deployed in ML Node, skipping test") documents = ["foo bar", "bar foo", "foo"] embedding = ElasticsearchEmbeddings( client=sync_client, user_agent="test", model_id=MODEL_ID ) output = embedding.embed_documents(documents) assert len(output) == 3 assert len(output[0]) == NUM_DIMENSIONS assert len(output[1]) == NUM_DIMENSIONS assert len(output[2]) == NUM_DIMENSIONS def test_elasticsearch_embedding_query(sync_client: Elasticsearch) -> None: """Test Elasticsearch embedding query.""" if not model_is_deployed(sync_client, MODEL_ID): pytest.skip(f"{MODEL_ID} model is not deployed in ML Node, skipping test") document = "foo bar" embedding = ElasticsearchEmbeddings( client=sync_client, user_agent="test", model_id=MODEL_ID ) output = embedding.embed_query(document) assert len(output) == NUM_DIMENSIONS def test_user_agent_default( sync_client: Elasticsearch, sync_client_request_saving: Elasticsearch ) -> None: """Test to make sure the user-agent is set correctly.""" if not model_is_deployed(sync_client, MODEL_ID): pytest.skip(f"{MODEL_ID} model is not deployed in ML Node, skipping test") embeddings = ElasticsearchEmbeddings( client=sync_client_request_saving, model_id=MODEL_ID ) expected_pattern = r"^elasticsearch-py-es/\d+\.\d+\.\d+$" got_agent = embeddings.client._headers["User-Agent"] assert ( re.match(expected_pattern, got_agent) is not None ), f"The user agent '{got_agent}' does not match the expected pattern." embeddings.embed_query("foo bar") requests = embeddings.client.transport.requests # type: ignore assert len(requests) == 1 got_request_agent = requests[0]["headers"]["User-Agent"] assert ( re.match(expected_pattern, got_request_agent) is not None ), f"The user agent '{got_request_agent}' does not match the expected pattern." python-elasticsearch-9.1.1/test_elasticsearch/test_server/test_vectorstore/test_vectorstore.py000066400000000000000000001175171506101734100335110ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import re from functools import partial from typing import Any, List, Optional, Union import pytest from elasticsearch import Elasticsearch, NotFoundError from elasticsearch.helpers import BulkIndexError from elasticsearch.helpers.vectorstore import ( BM25Strategy, DenseVectorScriptScoreStrategy, DenseVectorStrategy, DistanceMetric, SparseVectorStrategy, VectorStore, ) from elasticsearch.helpers.vectorstore._sync._utils import model_is_deployed from test_elasticsearch.utils import es_version from . import ConsistentFakeEmbeddings, FakeEmbeddings logging.basicConfig(level=logging.DEBUG) """ docker-compose up elasticsearch By default runs against local docker instance of Elasticsearch. To run against Elastic Cloud, set the following environment variables: - ES_CLOUD_ID - ES_API_KEY Some of the tests require the following models to be deployed in the ML Node: - elser (can be downloaded and deployed through Kibana and trained models UI) - sentence-transformers__all-minilm-l6-v2 (can be deployed through the API, loaded via eland) These tests that require the models to be deployed are skipped by default. Enable them by adding the model name to the modelsDeployed list below. """ ELSER_MODEL_ID = ".elser_model_2" TRANSFORMER_MODEL_ID = "sentence-transformers__all-minilm-l6-v2" class TestVectorStore: def test_search_without_metadata( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search without metadata.""" def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "knn": { "field": "vector_field", "filter": [], "k": 1, "num_candidates": 50, "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0], } } return query_body store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=ConsistentFakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_search_without_metadata_async( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search without metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=ConsistentFakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) output = store.search(query="foo", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_add_vectors(self, sync_client: Elasticsearch, index: str) -> None: """ Test adding pre-built embeddings instead of using inference for the texts. This allows you to separate the embeddings text and the page_content for better proximity between user's question and embedded text. For example, your embedding text can be a question, whereas page_content is the answer. """ embeddings = ConsistentFakeEmbeddings() texts = ["foo1", "foo2", "foo3"] metadatas = [{"page": i} for i in range(len(texts))] embedding_vectors = embeddings.embed_documents(texts) store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=embeddings, client=sync_client, ) store.add_texts(texts=texts, vectors=embedding_vectors, metadatas=metadatas) output = store.search(query="foo1", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["foo1"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [0] def test_search_with_metadata(self, sync_client: Elasticsearch, index: str) -> None: """Test end to end construction and search with metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=ConsistentFakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] store.add_texts(texts=texts, metadatas=metadatas) output = store.search(query="foo", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [0] output = store.search(query="bar", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["bar"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [1] def test_search_with_filter(self, sync_client: Elasticsearch, index: str) -> None: """Test end to end construction and search with metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "foo", "foo"] metadatas = [{"page": i} for i in range(len(texts))] store.add_texts(texts=texts, metadatas=metadatas) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "knn": { "field": "vector_field", "filter": [{"term": {"metadata.page": "1"}}], "k": 3, "num_candidates": 50, "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], } } return query_body output = store.search( query="foo", k=3, filter=[{"term": {"metadata.page": "1"}}], custom_query=assert_query, ) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [1] def test_search_script_score(self, sync_client: Elasticsearch, index: str) -> None: """Test end to end construction and search with metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy(), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) expected_query = { "query": { "script_score": { "query": {"match_all": {}}, "script": { "source": "cosineSimilarity(params.query_vector, 'vector_field') + 1.0", # noqa: E501 "params": { "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] }, }, } } } def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == expected_query return query_body output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_search_script_score_with_filter( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search with metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy(), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] store.add_texts(texts=texts, metadatas=metadatas) def assert_query(query_body: dict, query: Optional[str]) -> dict: expected_query = { "query": { "script_score": { "query": {"bool": {"filter": [{"term": {"metadata.page": 0}}]}}, "script": { "source": "cosineSimilarity(params.query_vector, 'vector_field') + 1.0", # noqa: E501 "params": { "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] }, }, } } } assert query_body == expected_query return query_body output = store.search( query="foo", k=1, custom_query=assert_query, filter=[{"term": {"metadata.page": 0}}], ) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [0] def test_search_script_score_distance_dot_product( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search with metadata.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy( distance=DistanceMetric.DOT_PRODUCT, ), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "query": { "script_score": { "query": {"match_all": {}}, "script": { "source": """ double value = dotProduct(params.query_vector, 'vector_field'); return sigmoid(1, Math.E, -value); """, "params": { "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] }, }, } } } return query_body output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_search_knn_with_hybrid_search( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and search with metadata.""" if es_version(sync_client) < (8, 14): pytest.skip("This test requires Elasticsearch 8.14 or newer") store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(hybrid=True), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "retriever": { "rrf": { "retrievers": [ { "standard": { "query": { "bool": { "filter": [], "must": [ { "match": { "text_field": {"query": "foo"} } } ], } }, }, }, { "knn": { "field": "vector_field", "filter": [], "k": 1, "num_candidates": 50, "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ], }, }, ], } } } return query_body output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_search_knn_with_hybrid_search_rrf( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to end construction and rrf hybrid search with metadata.""" if es_version(sync_client) < (8, 14): pytest.skip("This test requires Elasticsearch 8.14 or newer") texts = ["foo", "bar", "baz"] def assert_query( query_body: dict, query: Optional[str], expected_rrf: Union[dict, bool], ) -> dict: standard_query = { "query": { "bool": { "filter": [], "must": [{"match": {"text_field": {"query": "foo"}}}], } } } knn_query = { "field": "vector_field", "filter": [], "k": 3, "num_candidates": 50, "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ], } if expected_rrf is not False: cmp_query_body = { "retriever": { "rrf": { "retrievers": [ {"standard": standard_query}, {"knn": knn_query}, ], } } } if isinstance(expected_rrf, dict): cmp_query_body["retriever"]["rrf"].update(expected_rrf) else: cmp_query_body = { "knn": knn_query, **standard_query, } assert query_body == cmp_query_body return query_body # 1. check query_body is okay if es_version(sync_client) >= (8, 14): rrf_test_cases: List[Union[dict, bool]] = [ True, False, {"rank_constant": 1, "rank_window_size": 5}, ] else: # for 8.13.x and older there is no retriever query, so we can only # run hybrid searches with rrf=False rrf_test_cases: List[Union[dict, bool]] = [False] for rrf_test_case in rrf_test_cases: store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(hybrid=True, rrf=rrf_test_case), embedding_service=FakeEmbeddings(), client=sync_client, ) store.add_texts(texts) # without fetch_k parameter output = store.search( query="foo", k=3, custom_query=partial(assert_query, expected_rrf=rrf_test_case), ) # 2. check query result is okay es_output = store.client.search( index=index, retriever={ "rrf": { "retrievers": [ { "knn": { "field": "vector_field", "filter": [], "k": 3, "num_candidates": 50, "query_vector": [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ], }, }, { "standard": { "query": { "bool": { "filter": [], "must": [ {"match": {"text_field": {"query": "foo"}}} ], } }, }, }, ], "rank_constant": 1, "rank_window_size": 5, } }, size=3, ) assert [o["_source"]["text_field"] for o in output] == [ e["_source"]["text_field"] for e in es_output["hits"]["hits"] ] # 3. check rrf default option is okay store = VectorStore( index=f"{index}_default", retrieval_strategy=DenseVectorStrategy(hybrid=True), embedding_service=FakeEmbeddings(), client=sync_client, ) store.add_texts(texts) # with fetch_k parameter output = store.search( query="foo", k=3, num_candidates=50, custom_query=partial(assert_query, expected_rrf={}), ) def test_search_knn_with_custom_query_fn( self, sync_client: Elasticsearch, index: str ) -> None: """test that custom query function is called with the query string and query body""" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=FakeEmbeddings(), client=sync_client, ) def my_custom_query(query_body: dict, query: Optional[str]) -> dict: assert query == "foo" assert query_body == { "knn": { "field": "vector_field", "filter": [], "k": 1, "num_candidates": 50, "query_vector": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0], } } return {"query": {"match": {"text_field": {"query": "bar"}}}} """Test end to end construction and search with metadata.""" texts = ["foo", "bar", "baz"] store.add_texts(texts) output = store.search(query="foo", k=1, custom_query=my_custom_query) assert [doc["_source"]["text_field"] for doc in output] == ["bar"] def test_search_with_knn_infer_instack( self, sync_client: Elasticsearch, index: str ) -> None: """test end to end with knn retrieval strategy and inference in-stack""" if not model_is_deployed(sync_client, TRANSFORMER_MODEL_ID): pytest.skip( f"{TRANSFORMER_MODEL_ID} model not deployed in ML Node skipping test" ) text_field = "text_field" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(model_id=TRANSFORMER_MODEL_ID), client=sync_client, ) # setting up the pipeline for inference store.client.ingest.put_pipeline( id="test_pipeline", processors=[ { "inference": { "model_id": TRANSFORMER_MODEL_ID, "field_map": {"query_field": text_field}, "target_field": "vector_query_field", } } ], ) # creating a new index with the pipeline, # not relying on langchain to create the index store.client.indices.create( index=index, mappings={ "properties": { text_field: {"type": "text_field"}, "vector_query_field": { "properties": { "predicted_value": { "type": "dense_vector", "dims": 384, "index": True, "similarity": "l2_norm", } } }, } }, settings={"index": {"default_pipeline": "test_pipeline"}}, ) # adding documents to the index texts = ["foo", "bar", "baz"] for i, text in enumerate(texts): store.client.create( index=index, id=str(i), document={text_field: text, "metadata": {}}, ) store.client.indices.refresh(index=index) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "knn": { "filter": [], "field": "vector_query_field.predicted_value", "k": 1, "num_candidates": 50, "query_vector_builder": { "text_embedding": { "model_id": TRANSFORMER_MODEL_ID, "model_text": "foo", } }, } } return query_body output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] output = store.search(query="bar", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["bar"] def test_search_with_sparse_infer_instack( self, sync_client: Elasticsearch, index: str ) -> None: """test end to end with sparse retrieval strategy and inference in-stack""" if not model_is_deployed(sync_client, ELSER_MODEL_ID): reason = f"{ELSER_MODEL_ID} model not deployed in ML Node, skipping test" pytest.skip(reason) store = VectorStore( index=index, retrieval_strategy=SparseVectorStrategy(model_id=ELSER_MODEL_ID), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) output = store.search(query="foo", k=1) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_deployed_model_check_fails_semantic( self, sync_client: Elasticsearch, index: str ) -> None: """test that exceptions are raised if a specified model is not deployed""" with pytest.raises(NotFoundError): store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy( model_id="non-existing model ID" ), client=sync_client, ) store.add_texts(["foo", "bar", "baz"]) def test_search_bm25(self, sync_client: Elasticsearch, index: str) -> None: """Test end to end using the BM25Strategy retrieval strategy.""" store = VectorStore( index=index, retrieval_strategy=BM25Strategy(), client=sync_client, ) texts = ["foo", "bar", "baz"] store.add_texts(texts) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "query": { "bool": { "must": [{"match": {"text_field": {"query": "foo"}}}], "filter": [], } } } return query_body output = store.search(query="foo", k=1, custom_query=assert_query) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] def test_search_bm25_with_filter( self, sync_client: Elasticsearch, index: str ) -> None: """Test end to using the BM25Strategy retrieval strategy with metadata.""" store = VectorStore( index=index, retrieval_strategy=BM25Strategy(), client=sync_client, ) texts = ["foo", "foo", "foo"] metadatas = [{"page": i} for i in range(len(texts))] store.add_texts(texts=texts, metadatas=metadatas) def assert_query(query_body: dict, query: Optional[str]) -> dict: assert query_body == { "query": { "bool": { "must": [{"match": {"text_field": {"query": "foo"}}}], "filter": [{"term": {"metadata.page": 1}}], } } } return query_body output = store.search( query="foo", k=3, custom_query=assert_query, filter=[{"term": {"metadata.page": 1}}], ) assert [doc["_source"]["text_field"] for doc in output] == ["foo"] assert [doc["_source"]["metadata"]["page"] for doc in output] == [1] def test_delete(self, sync_client: Elasticsearch, index: str) -> None: """Test delete methods from vector store.""" store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(), embedding_service=FakeEmbeddings(), client=sync_client, ) texts = ["foo", "bar", "baz", "gni"] metadatas = [{"page": i} for i in range(len(texts))] ids = store.add_texts(texts=texts, metadatas=metadatas) output = store.search(query="foo", k=10) assert len(output) == 4 store.delete(ids=ids[1:3]) output = store.search(query="foo", k=10) assert len(output) == 2 store.delete(ids=["not-existing"]) output = store.search(query="foo", k=10) assert len(output) == 2 store.delete(ids=[ids[0]]) output = store.search(query="foo", k=10) assert len(output) == 1 store.delete(ids=[ids[3]]) output = store.search(query="gni", k=10) assert len(output) == 0 def test_indexing_exception_error( self, sync_client: Elasticsearch, index: str, caplog: pytest.LogCaptureFixture, ) -> None: """Test bulk exception logging is giving better hints.""" store = VectorStore( index=index, retrieval_strategy=BM25Strategy(), client=sync_client, ) store.client.indices.create( index=index, mappings={"properties": {}}, settings={"index": {"default_pipeline": "not-existing-pipeline"}}, ) texts = ["foo"] with pytest.raises(BulkIndexError): store.add_texts(texts) error_reason = "pipeline with id [not-existing-pipeline] does not exist" log_message = f"First error reason: {error_reason}" assert log_message in caplog.text def test_user_agent_default( self, sync_client_request_saving: Elasticsearch, index: str ) -> None: """Test to make sure the user-agent is set correctly.""" store = VectorStore( index=index, retrieval_strategy=BM25Strategy(), client=sync_client_request_saving, ) expected_pattern = r"^elasticsearch-py-vs/\d+\.\d+\.\d+$" got_agent = store.client._headers["User-Agent"] assert ( re.match(expected_pattern, got_agent) is not None ), f"The user agent '{got_agent}' does not match the expected pattern." texts = ["foo", "bob", "baz"] store.add_texts(texts) for request in store.client.transport.requests: # type: ignore agent = request["headers"]["User-Agent"] assert ( re.match(expected_pattern, agent) is not None ), f"The user agent '{agent}' does not match the expected pattern." def test_user_agent_custom( self, sync_client_request_saving: Elasticsearch, index: str ) -> None: """Test to make sure the user-agent is set correctly.""" user_agent = "this is THE user_agent!" store = VectorStore( user_agent=user_agent, index=index, retrieval_strategy=BM25Strategy(), client=sync_client_request_saving, ) assert store.client._headers["User-Agent"] == user_agent texts = ["foo", "bob", "baz"] store.add_texts(texts) for request in store.client.transport.requests: # type: ignore assert request["headers"]["User-Agent"] == user_agent def test_bulk_args(self, sync_client_request_saving: Any, index: str) -> None: """Test to make sure the bulk arguments work as expected.""" store = VectorStore( index=index, retrieval_strategy=BM25Strategy(), client=sync_client_request_saving, ) texts = ["foo", "bob", "baz"] store.add_texts(texts, bulk_kwargs={"chunk_size": 1}) # 1 for index exist, 1 for index create, 3 to index docs assert len(store.client.transport.requests) == 5 # type: ignore def test_max_marginal_relevance_search_errors( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search error conditions.""" texts = ["foo", "bar", "baz"] vector_field = "vector_field" embedding_service = ConsistentFakeEmbeddings() store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy(), embedding_service=embedding_service, client=sync_client, ) store.add_texts(texts) # search without query embeddings vector or query with pytest.raises( ValueError, match="specify either query or query_embedding to search" ): store.max_marginal_relevance_search( vector_field=vector_field, k=3, num_candidates=3, ) # search without service no_service_store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy(), client=sync_client, ) with pytest.raises( ValueError, match="specify embedding_service to search with query" ): no_service_store.max_marginal_relevance_search( query=texts[0], vector_field=vector_field, k=3, num_candidates=3, ) def test_max_marginal_relevance_search( self, sync_client: Elasticsearch, index: str ) -> None: """Test max marginal relevance search.""" texts = ["foo", "bar", "baz"] vector_field = "vector_field" text_field = "text_field" query_embedding = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0] embedding_service = ConsistentFakeEmbeddings() store = VectorStore( index=index, retrieval_strategy=DenseVectorScriptScoreStrategy(), embedding_service=embedding_service, vector_field=vector_field, text_field=text_field, client=sync_client, ) store.add_texts(texts) # search with query mmr_output = store.max_marginal_relevance_search( query=texts[0], vector_field=vector_field, k=3, num_candidates=3, ) sim_output = store.search(query=texts[0], k=3) assert mmr_output == sim_output # search with query embeddings mmr_output = store.max_marginal_relevance_search( query_embedding=query_embedding, vector_field=vector_field, k=3, num_candidates=3, ) sim_output = store.search(query_vector=query_embedding, k=3) assert mmr_output == sim_output mmr_output = store.max_marginal_relevance_search( query=texts[0], vector_field=vector_field, k=2, num_candidates=3, ) assert len(mmr_output) == 2 assert mmr_output[0]["_source"][text_field] == texts[0] assert mmr_output[1]["_source"][text_field] == texts[1] mmr_output = store.max_marginal_relevance_search( query=texts[0], vector_field=vector_field, k=2, num_candidates=3, lambda_mult=0.1, # more diversity ) assert len(mmr_output) == 2 assert mmr_output[0]["_source"][text_field] == texts[0] assert mmr_output[1]["_source"][text_field] == texts[2] # if fetch_k < k, then the output will be less than k mmr_output = store.max_marginal_relevance_search( query=texts[0], vector_field=vector_field, k=3, num_candidates=2, ) assert len(mmr_output) == 2 def test_metadata_mapping(self, sync_client: Elasticsearch, index: str) -> None: """Test that the metadata mapping is applied.""" test_mappings = { "my_field": {"type": "keyword"}, "another_field": {"type": "text"}, } store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(distance=DistanceMetric.COSINE), embedding_service=FakeEmbeddings(), num_dimensions=10, client=sync_client, metadata_mappings=test_mappings, ) texts = ["foo", "foo", "foo"] metadatas = [{"my_field": str(i)} for i in range(len(texts))] store.add_texts(texts=texts, metadatas=metadatas) mapping_response = sync_client.indices.get_mapping(index=index) mapping_properties = mapping_response[index]["mappings"]["properties"] assert mapping_properties["vector_field"] == { "type": "dense_vector", "dims": 10, "index": True, "index_options": { "ef_construction": 100, "m": 16, "type": "int8_hnsw", }, "similarity": "cosine", } assert "metadata" in mapping_properties for key, val in test_mappings.items(): assert mapping_properties["metadata"]["properties"][key] == val def test_custom_index_settings( self, sync_client: Elasticsearch, index: str ) -> None: """Test that the custom index settings are applied.""" test_settings = { "analysis": { "tokenizer": { "custom_tokenizer": {"type": "pattern", "pattern": "[,;\\s]+"} }, "analyzer": { "custom_analyzer": { "type": "custom", "tokenizer": "custom_tokenizer", } }, } } test_mappings = { "my_field": {"type": "keyword"}, "another_field": {"type": "text", "analyzer": "custom_analyzer"}, } store = VectorStore( index=index, retrieval_strategy=DenseVectorStrategy(distance=DistanceMetric.COSINE), embedding_service=FakeEmbeddings(), num_dimensions=10, client=sync_client, metadata_mappings=test_mappings, custom_index_settings=test_settings, ) sample_texts = [ "Sample text one, with some keywords.", "Another; sample, text with; different keywords.", "Third example text, with more keywords.", ] store.add_texts(texts=sample_texts) # Fetch the actual index settings from Elasticsearch actual_settings = sync_client.indices.get_settings(index=index) # Assert that the custom settings were applied correctly custom_settings_applied = actual_settings[index]["settings"]["index"][ "analysis" ] assert ( custom_settings_applied == test_settings["analysis"] ), f"Expected custom index settings {test_settings} but got {custom_settings_applied}" def test_custom_index_settings_with_collision( self, sync_client: Elasticsearch, index: str ) -> None: """Test that custom index settings that collide cause an error.""" test_settings = { "default_pipeline": "my_pipeline", "analysis": { "tokenizer": { "custom_tokenizer": {"type": "pattern", "pattern": "[,;\\s]+"} }, "analyzer": { "custom_analyzer": { "type": "custom", "tokenizer": "custom_tokenizer", } }, }, } test_mappings = { "my_field": {"type": "keyword"}, "another_field": {"type": "text", "analyzer": "custom_analyzer"}, } store = VectorStore( index=index, retrieval_strategy=SparseVectorStrategy(), client=sync_client, metadata_mappings=test_mappings, custom_index_settings=test_settings, ) with pytest.raises(ValueError, match="Conflicting settings"): store.add_texts(texts=["some text"]) python-elasticsearch-9.1.1/test_elasticsearch/test_strategies.py000066400000000000000000000054141506101734100253110ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest from elasticsearch.helpers.vectorstore import ( DenseVectorScriptScoreStrategy, DenseVectorStrategy, SparseVectorStrategy, ) def test_sparse_vector_strategy_raises_errors(): strategy = SparseVectorStrategy("my_model_id") with pytest.raises(ValueError): # missing query strategy.es_query( query=None, query_vector=None, text_field="text_field", vector_field="vector_field", k=10, num_candidates=20, filter=[], ) with pytest.raises(ValueError): # query vector not allowed strategy.es_query( query="hi", query_vector=[1, 2, 3], text_field="text_field", vector_field="vector_field", k=10, num_candidates=20, filter=[], ) def test_dense_vector_strategy_raises_error(): with pytest.raises(ValueError): # unknown distance DenseVectorStrategy(hybrid=True, text_field=None) with pytest.raises(ValueError): # unknown distance DenseVectorStrategy(distance="unknown distance").es_mappings_settings( text_field="text_field", vector_field="vector_field", num_dimensions=10 ) def test_dense_vector_script_score_strategy_raises_error(): with pytest.raises(ValueError): # missing query vector DenseVectorScriptScoreStrategy().es_query( query=None, query_vector=None, text_field="text_field", vector_field="vector_field", k=10, num_candidates=20, filter=[], ) with pytest.raises(ValueError): # unknown distance DenseVectorScriptScoreStrategy(distance="unknown distance").es_query( query=None, query_vector=[1, 2, 3], text_field="text_field", vector_field="vector_field", k=10, num_candidates=20, filter=[], ) python-elasticsearch-9.1.1/test_elasticsearch/test_transport.py000066400000000000000000000540371506101734100252000ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import re import time import warnings from typing import Any, Dict, Optional import pytest from elastic_transport import ( ApiResponseMeta, BaseNode, HttpHeaders, NodeConfig, NodePool, ) from elastic_transport._node import NodeApiResponse from elastic_transport.client_utils import DEFAULT from elasticsearch import Elasticsearch, __versionstr__ from elasticsearch.exceptions import ( ApiError, ConnectionError, ElasticsearchWarning, UnsupportedProductError, ) class DummyNode(BaseNode): def __init__(self, config: NodeConfig): self.resp_status = config._extras.pop("status", 200) self.resp_error = config._extras.pop("exception", None) self.resp_data = config._extras.pop("data", b"{}") self.resp_headers = config._extras.pop( "headers", {"X-elastic-product": "Elasticsearch"} ) self.calls = [] super().__init__(config) def perform_request(self, *args, **kwargs): self.calls.append((args, kwargs)) if self.resp_error: raise self.resp_error return NodeApiResponse( ApiResponseMeta( status=self.resp_status, headers=HttpHeaders(self.resp_headers), http_version="1.1", duration=0.0, node=self.config, ), self.resp_data, ) class NoTimeoutConnectionPool(NodePool): def mark_dead(self, connection): pass def mark_live(self, connection): pass CLUSTER_NODES = """{ "_nodes" : { "total" : 1, "successful" : 1, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZ", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } } } }""" CLUSTER_NODES_7x_PUBLISH_HOST = """{ "_nodes" : { "total" : 1, "successful" : 1, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZ", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } } } }""" CLUSTER_NODES_MASTER_ONLY = """{ "_nodes" : { "total" : 2, "successful" : 2, "failed" : 0 }, "cluster_name" : "elasticsearch", "nodes" : { "SRZpKFZdQguhhvifmN6UVA" : { "name" : "SRZpKFZa", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : ["master"], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:123", "max_content_length_in_bytes" : 104857600 } }, "SRZpKFZdQguhhvifmN6UVB" : { "name" : "SRZpKFZb", "transport_address" : "127.0.0.1:9300", "host" : "127.0.0.1", "ip" : "127.0.0.1", "version" : "5.0.0", "build_hash" : "253032b", "roles" : [ "master", "data", "ingest" ], "http" : { "bound_address" : [ "[fe80::1]:9200", "[::1]:9200", "127.0.0.1:9200" ], "publish_address" : "somehost.tld/1.1.1.1:124", "max_content_length_in_bytes" : 104857600 } } } }""" class TestTransport: def test_request_timeout_extracted_from_params_and_passed(self): client = Elasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) client.info(params={"request_timeout": 42}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["request_timeout"] == 42 def test_opaque_id(self): client = Elasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode, opaque_id="app-1", ) client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["headers"]["x-opaque-id"] == "app-1" # Now try with an 'x-opaque-id' set on perform_request(). client.info(opaque_id="request-2") calls = client.transport.node_pool.get().calls assert 2 == len(calls) assert calls[1][0] == ("GET", "/") assert calls[1][1]["headers"]["x-opaque-id"] == "request-2" def test_custom_user_agent_on_initialization(self): client = Elasticsearch( "http://localhost:9200", headers={"user-agent": "custom/1.2.3"} ) headers = [node.config for node in client.transport.node_pool.all()][0].headers assert list(headers.keys()) == ["user-agent"] assert headers["user-agent"].startswith(f"elasticsearch-py/{__versionstr__} (") def test_request_with_custom_user_agent_header(self): client = Elasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) client.info(headers={"User-Agent": "my-custom-value/1.2.3"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["headers"]["user-agent"] == "my-custom-value/1.2.3" def test_request_with_custom_user_agent_header_set_at_client_level(self): client = Elasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode, headers={"User-Agent": "my-custom-value/1.2.3"}, ) client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][0] == ("GET", "/") assert calls[0][1]["headers"]["user-agent"] == "my-custom-value/1.2.3" def test_client_meta_header(self): client = Elasticsearch("http://localhost:9200", node_class=DummyNode) client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) headers = calls[0][1]["headers"] assert re.search( r"^es=[0-9.]+p?,py=[0-9.]+p?,t=[0-9.]+p?$", headers["x-elastic-client-meta"] ) class DummyNodeWithMeta(DummyNode): _CLIENT_META_HTTP_CLIENT = ("dm", "1.2.3") client = Elasticsearch("http://localhost:9200", node_class=DummyNodeWithMeta) client.info(headers={"CustoM": "header"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) headers = calls[0][1]["headers"] assert re.search( r"^es=[0-9.]+p?,py=[0-9.]+p?,t=[0-9.]+p?,dm=1.2.3$", headers["x-elastic-client-meta"], ) assert headers["Custom"] == "header" def test_client_meta_header_not_sent(self): client = Elasticsearch( "http://localhost:9200", meta_header=False, node_class=DummyNode ) client.info() calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert calls[0][1]["headers"] == { "accept": "application/vnd.elasticsearch+json; compatible-with=9", } def test_meta_header_type_error(self): with pytest.raises(TypeError) as e: Elasticsearch("https://localhost:9200", meta_header=1) assert str(e.value) == "'meta_header' must be of type bool" def test_body_surrogates_replaced_encoded_into_bytes(self): client = Elasticsearch("http://localhost:9200", node_class=DummyNode) client.search(query={"match": "你好\uda6a"}) calls = client.transport.node_pool.get().calls assert 1 == len(calls) assert ( calls[0][1]["body"] == b'{"query":{"match":"\xe4\xbd\xa0\xe5\xa5\xbd\xed\xa9\xaa"}}' ) def test_kwargs_passed_on_to_node_pool(self): dt = object() client = Elasticsearch("http://localhost:9200", dead_node_backoff_factor=dt) assert dt is client.transport.node_pool.dead_node_backoff_factor def test_custom_node_class(self): class MyConnection: def __init__(self, *_, **__): pass def perform_request(*_, **__): pass client = Elasticsearch("http://localhost:9200", node_class=MyConnection) assert 1 == len(client.transport.node_pool) assert isinstance(client.transport.node_pool.all()[0], MyConnection) def test_request_will_fail_after_x_retries(self): client = Elasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"exception": ConnectionError("abandon ship!")}, ) ], node_class=DummyNode, ) with pytest.raises(ConnectionError) as e: client.info() calls = client.transport.node_pool.get().calls assert 4 == len(calls) assert len(e.value.errors) == 3 del calls[:] with pytest.raises(ConnectionError): client.options(max_retries=5).info() calls = client.transport.node_pool.get().calls assert 6 == len(calls) def test_failed_connection_will_be_marked_as_dead(self): client = Elasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"exception": ConnectionError("abandon ship!")}, ), NodeConfig( "http", "localhost", 9201, _extras={"exception": ConnectionError("abandon ship!")}, ), ], node_class=DummyNode, ) with pytest.raises(ConnectionError): client.info() assert 0 == len(client.transport.node_pool._alive_nodes) def test_resurrected_connection_will_be_marked_as_live_on_success(self): client = Elasticsearch( [ NodeConfig("http", "localhost", 9200), NodeConfig("http", "localhost", 9201), ], node_class=DummyNode, ) node1 = client.transport.node_pool.get() node2 = client.transport.node_pool.get() assert node1 is not node2 client.transport.node_pool.mark_dead(node1) client.transport.node_pool.mark_dead(node2) assert len(client.transport.node_pool._alive_nodes) == 0 client.info() assert len(client.transport.node_pool._alive_nodes) == 1 assert len(client.transport.node_pool._dead_consecutive_failures) == 1 def test_override_mark_dead_mark_live(self): client = Elasticsearch( [ NodeConfig("http", "localhost", 9200), NodeConfig("http", "localhost", 9201), ], node_class=DummyNode, node_pool_class=NoTimeoutConnectionPool, ) node1 = client.transport.node_pool.get() node2 = client.transport.node_pool.get() assert node1 is not node2 client.transport.node_pool.mark_dead(node1) client.transport.node_pool.mark_dead(node2) assert len(client.transport.node_pool._alive_nodes) == 2 client.info() assert len(client.transport.node_pool._alive_nodes) == 2 assert len(client.transport.node_pool._dead_consecutive_failures) == 0 @pytest.mark.parametrize( ["nodes_info_response", "node_host"], [(CLUSTER_NODES, "1.1.1.1"), (CLUSTER_NODES_7x_PUBLISH_HOST, "somehost.tld")], ) def test_sniff_will_use_seed_connections(self, nodes_info_response, node_host): client = Elasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"data": nodes_info_response} ) ], node_class=DummyNode, sniff_on_start=True, ) node_configs = [node.config for node in client.transport.node_pool.all()] assert len(node_configs) == 2 assert NodeConfig("http", node_host, 123) in node_configs def test_sniff_on_start_ignores_sniff_timeout(self): client = Elasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_on_start=True, sniff_timeout=12, meta_header=False, ) calls = client.transport.node_pool.all()[0].calls assert len(calls) == 1 assert calls[0] == ( ("GET", "/_nodes/_all/http"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, "request_timeout": None, # <-- Should be None instead of 12 }, ) def test_sniff_uses_sniff_timeout(self): client = Elasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_before_requests=True, sniff_timeout=12, meta_header=False, ) client.info() calls = client.transport.node_pool.all()[0].calls assert len(calls) == 2 assert calls[0] == ( ("GET", "/_nodes/_all/http"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9" }, "request_timeout": 12, }, ) assert calls[1] == ( ("GET", "/"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "request_timeout": DEFAULT, }, ) def test_sniff_reuses_node_instances(self): client = Elasticsearch( [NodeConfig("http", "1.1.1.1", 123, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, sniff_on_start=True, ) assert len(client.transport.node_pool) == 1 client.info() assert len(client.transport.node_pool) == 1 def test_sniff_after_n_seconds(self): client = Elasticsearch( # noqa: F821 [NodeConfig("http", "localhost", 9200, _extras={"data": CLUSTER_NODES})], node_class=DummyNode, min_delay_between_sniffing=5, ) client.transport._last_sniffed_at = time.time() client.info() for _ in range(4): client.info() assert 1 == len(client.transport.node_pool) client.transport._last_sniffed_at = time.time() - 5.1 client.info() assert 2 == len(client.transport.node_pool) assert "http://1.1.1.1:123" in ( node.base_url for node in client.transport.node_pool.all() ) assert time.time() - 1 < client.transport._last_sniffed_at < time.time() + 0.01 @pytest.mark.parametrize( "kwargs", [ {"sniff_on_start": True}, {"sniff_on_node_failure": True}, {"sniff_before_requests": True}, {"sniff_timeout": 1}, ], ) def test_sniffing_disabled_on_elastic_cloud(self, kwargs): with pytest.raises(ValueError) as e: Elasticsearch( cloud_id="cluster:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5NyQ0ZmE4ODIxZTc1NjM0MDMyYmVkMWNmMjIxMTBlMmY5Ng==", **kwargs, ) assert ( str(e.value) == "Sniffing should not be enabled when connecting to Elastic Cloud" ) def test_sniffing_master_only_filtered_by_default(self): client = Elasticsearch( # noqa: F821 [ NodeConfig( "http", "localhost", 9200, _extras={"data": CLUSTER_NODES_MASTER_ONLY}, ) ], node_class=DummyNode, sniff_on_start=True, ) assert len(client.transport.node_pool) == 2 def test_sniff_node_callback(self): def sniffed_node_callback( node_info: Dict[str, Any], node_config: NodeConfig ) -> Optional[NodeConfig]: return ( node_config if node_info["http"]["publish_address"].endswith(":124") else None ) client = Elasticsearch( # noqa: F821 [ NodeConfig( "http", "localhost", 9200, _extras={"data": CLUSTER_NODES_MASTER_ONLY}, ) ], node_class=DummyNode, sniff_on_start=True, sniffed_node_callback=sniffed_node_callback, ) assert len(client.transport.node_pool) == 2 ports = {node.config.port for node in client.transport.node_pool.all()} assert ports == {9200, 124} @pytest.mark.parametrize("headers", [{}, {"X-elastic-product": "BAD HEADER"}]) def test_unsupported_product_error(headers): client = Elasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"headers": headers})], meta_header=False, node_class=DummyNode, ) with pytest.raises(UnsupportedProductError) as e: client.info() assert str(e.value) == ( "The client noticed that the server is not Elasticsearch " "and we do not support this unknown product" ) calls = client.transport.node_pool.get().calls assert len(calls) == 1 assert calls[0] == ( ("GET", "/"), { "body": None, "headers": { "accept": "application/vnd.elasticsearch+json; compatible-with=9", }, "request_timeout": DEFAULT, }, ) @pytest.mark.parametrize("status", [401, 403, 413, 500]) def test_unsupported_product_error_not_raised_on_non_2xx(status): client = Elasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={"headers": {}, "status": status} ) ], meta_header=False, node_class=DummyNode, ) try: client.info() except UnsupportedProductError: assert False, "Raised UnsupportedProductError" except ApiError as e: assert e.meta.status == status @pytest.mark.parametrize("status", [404, 500]) def test_api_error_raised_before_product_error(status): client = Elasticsearch( [ NodeConfig( "http", "localhost", 9200, _extras={ "headers": {"X-elastic-product": "BAD HEADER"}, "status": status, }, ) ], meta_header=False, node_class=DummyNode, ) with pytest.raises(ApiError) as e: client.info() assert not isinstance(e.value, UnsupportedProductError) assert e.value.status_code == status calls = client.transport.node_pool.get().calls assert len(calls) == 1 assert calls[0][0] == ("GET", "/") @pytest.mark.parametrize( "headers", [ { "Warning": '299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."', "X-elastic-product": "Elasticsearch", }, { "Warning": '299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.", 299 Elasticsearch-8.0.0-SNAPSHOT-ad975cacd240b3329e160673c432e768dcd7899a "[xpack.monitoring.history.duration2] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."', "X-elastic-product": "Elasticsearch", }, ], ) def test_warning_header(headers): client = Elasticsearch( [NodeConfig("http", "localhost", 9200, _extras={"headers": headers})], meta_header=False, node_class=DummyNode, ) with warnings.catch_warnings(record=True) as w: client.info() assert len(w) == headers["Warning"].count("299") assert w[0].category == ElasticsearchWarning assert ( str(w[0].message) == "[xpack.monitoring.history.duration] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." ) python-elasticsearch-9.1.1/test_elasticsearch/test_types/000077500000000000000000000000001506101734100237255ustar00rootroot00000000000000python-elasticsearch-9.1.1/test_elasticsearch/test_types/README.md000066400000000000000000000003561506101734100252100ustar00rootroot00000000000000# Type Hints All of these scripts are used to test the type hinting distributed with the `elasticsearch` package. These scripts simulate normal usage of the client and are run through `mypy --strict` as a part of continuous integration. python-elasticsearch-9.1.1/test_elasticsearch/test_types/aliased_types.py000066400000000000000000000104231506101734100271250ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, AsyncGenerator, Dict, Generator from elastic_transport import Transport from elasticsearch8 import AsyncElasticsearch, Elasticsearch from elasticsearch8.helpers import ( async_bulk, async_reindex, async_scan, async_streaming_bulk, bulk, reindex, scan, streaming_bulk, ) es = Elasticsearch( [{"host": "localhost", "port": 9443}], transport_class=Transport, sniff_on_start=True, sniff_timeout=1, max_retries=1, retry_on_status={100, 400, 503}, retry_on_timeout=True, ) def sync_gen() -> Generator[Dict[Any, Any], None, None]: yield {} def scan_types() -> None: for _ in scan( es, query={"query": {"match_all": {}}}, request_timeout=10, clear_scroll=True, scroll_kwargs={"request_timeout": 10}, ): pass for _ in scan( es, raise_on_error=False, preserve_order=False, scroll="10m", size=10, request_timeout=10.0, ): pass def streaming_bulk_types() -> None: for _ in streaming_bulk(es, sync_gen()): pass for _ in streaming_bulk(es, sync_gen().__iter__()): pass for _ in streaming_bulk(es, [{"key": "value"}]): pass def bulk_types() -> None: _, _ = bulk(es, sync_gen()) _, _ = bulk(es, sync_gen().__iter__()) _, _ = bulk(es, [{"key": "value"}]) def reindex_types() -> None: _, _ = reindex( es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}} ) _, _ = reindex( es, source_index="src-index", target_index="target-index", target_client=es ) _, _ = reindex( es, "src-index", "target-index", chunk_size=1, scroll="10m", scan_kwargs={"request_timeout": 10}, bulk_kwargs={"request_timeout": 10}, ) es2 = AsyncElasticsearch( [{"host": "localhost", "port": 9443}], sniff_on_start=True, sniff_timeout=1, max_retries=1, retry_on_status={100, 400, 503}, retry_on_timeout=True, ) async def async_gen() -> AsyncGenerator[Dict[Any, Any], None]: yield {} async def async_scan_types() -> None: async for _ in async_scan( es2, query={"query": {"match_all": {}}}, request_timeout=10, clear_scroll=True, scroll_kwargs={"request_timeout": 10}, ): pass async for _ in async_scan( es2, raise_on_error=False, preserve_order=False, scroll="10m", size=10, request_timeout=10.0, ): pass async def async_streaming_bulk_types() -> None: async for _ in async_streaming_bulk(es2, async_gen()): pass async for _ in async_streaming_bulk(es2, async_gen().__aiter__()): pass async for _ in async_streaming_bulk(es2, [{"key": "value"}]): pass async def async_bulk_types() -> None: _, _ = await async_bulk(es2, async_gen()) _, _ = await async_bulk(es2, async_gen().__aiter__()) _, _ = await async_bulk(es2, [{}]) async def async_reindex_types() -> None: _, _ = await async_reindex( es2, "src-index", "target-index", query={"query": {"match": {"key": "val"}}} ) _, _ = await async_reindex( es2, source_index="src-index", target_index="target-index", target_client=es2 ) _, _ = await async_reindex( es2, "src-index", "target-index", chunk_size=1, scroll="10m", scan_kwargs={"request_timeout": 10}, bulk_kwargs={"request_timeout": 10}, ) python-elasticsearch-9.1.1/test_elasticsearch/test_types/async_types.py000066400000000000000000000057111506101734100266440ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, AsyncGenerator, Dict from elasticsearch import AsyncElasticsearch from elasticsearch.helpers import ( async_bulk, async_reindex, async_scan, async_streaming_bulk, ) es = AsyncElasticsearch( [{"host": "localhost", "port": 9443}], sniff_on_start=True, sniff_timeout=1, max_retries=1, retry_on_status={100, 400, 503}, retry_on_timeout=True, ) async def main() -> None: await es.options( request_timeout=1.0, max_retries=0, api_key="api-key-example" ).search(index="test-index") await es.options( request_timeout=1.0, max_retries=0, api_key="api-key-example" ).indices.exists(index="test-index") async def async_gen() -> AsyncGenerator[Dict[Any, Any], None]: yield {} async def async_scan_types() -> None: async for _ in async_scan( es, query={"query": {"match_all": {}}}, request_timeout=10, clear_scroll=True, scroll_kwargs={"request_timeout": 10}, ): pass async for _ in async_scan( es, raise_on_error=False, preserve_order=False, scroll="10m", size=10, request_timeout=10.0, ): pass async def async_streaming_bulk_types() -> None: async for _ in async_streaming_bulk(es, async_gen()): pass async for _ in async_streaming_bulk(es, async_gen().__aiter__()): pass async for _ in async_streaming_bulk(es, [{}]): pass async def async_bulk_types() -> None: _, _ = await async_bulk(es, async_gen()) _, _ = await async_bulk(es, async_gen().__aiter__()) _, _ = await async_bulk(es, [{}]) _, _ = await async_bulk(es, ({"key": "value"},)) async def async_reindex_types() -> None: _, _ = await async_reindex( es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}} ) _, _ = await async_reindex( es, source_index="src-index", target_index="target-index", target_client=es ) _, _ = await async_reindex( es, "src-index", "target-index", chunk_size=1, scroll="10m", scan_kwargs={"request_timeout": 10}, bulk_kwargs={"request_timeout": 10}, ) python-elasticsearch-9.1.1/test_elasticsearch/test_types/sync_types.py000066400000000000000000000052021506101734100264760ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Generator from elasticsearch import Elasticsearch from elasticsearch.helpers import bulk, reindex, scan, streaming_bulk es = Elasticsearch( [{"host": "localhost", "port": 9443}], sniff_on_start=True, sniff_timeout=1, max_retries=1, retry_on_status={100, 400, 503}, retry_on_timeout=True, ) es.options(request_timeout=1.0, max_retries=0, api_key="api-key-example").search( index="test-index" ) es.options( request_timeout=1.0, max_retries=0, api_key="api-key-example" ).indices.exists(index="test-index") def sync_gen() -> Generator[Dict[Any, Any], None, None]: yield {} def scan_types() -> None: for _ in scan( es, query={"query": {"match_all": {}}}, request_timeout=10, clear_scroll=True, scroll_kwargs={"request_timeout": 10}, ): pass for _ in scan( es, raise_on_error=False, preserve_order=False, scroll="10m", size=10, request_timeout=10.0, ): pass def streaming_bulk_types() -> None: for _ in streaming_bulk(es, sync_gen()): pass for _ in streaming_bulk(es, sync_gen().__iter__()): pass for _ in streaming_bulk(es, [{}]): pass def bulk_types() -> None: _, _ = bulk(es, sync_gen()) _, _ = bulk(es, sync_gen().__iter__()) _, _ = bulk(es, [{}]) _, _ = bulk(es, ({"key": "value"},)) def reindex_types() -> None: _, _ = reindex( es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}} ) _, _ = reindex( es, source_index="src-index", target_index="target-index", target_client=es ) _, _ = reindex( es, "src-index", "target-index", chunk_size=1, scroll="10m", scan_kwargs={"request_timeout": 10}, bulk_kwargs={"request_timeout": 10}, ) python-elasticsearch-9.1.1/test_elasticsearch/utils.py000066400000000000000000000142221506101734100232350ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import re import time from pathlib import Path from typing import Optional, Tuple from elasticsearch import ( ConnectionError, Elasticsearch, ) SOURCE_DIR = Path(__file__).absolute().parent.parent CA_CERTS = str(SOURCE_DIR / ".buildkite/certs/ca.crt") def es_url() -> str: """Grabs an Elasticsearch URL which can be designated via an environment variable otherwise falls back on localhost. """ urls_to_try = [] # Try user-supplied URLs before generic localhost ones. if os.environ.get("ELASTICSEARCH_URL", ""): urls_to_try.append(os.environ["ELASTICSEARCH_URL"]) if os.environ.get("elasticsearch_url", ""): urls_to_try.append(os.environ["elasticsearch_url"]) urls_to_try.extend( [ "https://localhost:9200", "http://localhost:9200", "https://elastic:changeme@localhost:9200", "http://elastic:changeme@localhost:9200", ] ) error = None for url in urls_to_try: if url.startswith("https://"): client = Elasticsearch(url, ca_certs=CA_CERTS, verify_certs=False) else: client = Elasticsearch(url) try: # Check that we get any sort of connection first. client.info() # After we get a connection let's wait for the cluster # to be in 'yellow' state, otherwise we could start # tests too early and get failures. for _ in range(100): try: client.cluster.health(wait_for_status="yellow") break except ConnectionError: time.sleep(0.1) except ConnectionError as e: if error is None: error = str(e) else: successful_url = url break else: raise RuntimeError( "Could not connect to Elasticsearch (tried %s): %s" % (", ".join(urls_to_try), error) ) return successful_url def es_version(client) -> Tuple[int, ...]: """Determines the version number and parses the number as a tuple of ints""" resp = client.info() return parse_version(resp["version"]["number"]) def parse_version(version: Optional[str]) -> Optional[Tuple[int, ...]]: """Parses a version number string into it's major, minor, patch as a tuple""" if not version: return None version_number = tuple( int(x) for x in re.search(r"^([0-9]+(?:\.[0-9]+)*)", version).group(1).split(".") ) return version_number def wipe_cluster(client): """Wipes a cluster clean between test cases""" close_after_wipe = False try: # If client is async we need to replace the client # with a synchronous one. from elasticsearch import AsyncElasticsearch if isinstance(client, AsyncElasticsearch): node_config = client.transport.node_pool.get().config client = Elasticsearch([node_config], verify_certs=False) close_after_wipe = True except ImportError: pass wipe_snapshots(client) wipe_data_streams(client) wipe_indices(client) client.indices.delete_template(name="*") client.indices.delete_index_template(name="*") wipe_cluster_settings(client) wait_for_cluster_state_updates_to_finish(client) if close_after_wipe: client.close() def wipe_cluster_settings(client): settings = client.cluster.get_settings() new_settings = {} for name, value in settings.items(): if value: new_settings.setdefault(name, {}) for key in value.keys(): new_settings[name][key + ".*"] = None if new_settings: client.cluster.put_settings(body=new_settings) def wipe_snapshots(client): """Deletes all the snapshots and repositories from the cluster""" in_progress_snapshots = [] repos = client.snapshot.get_repository(name="_all") for repo_name, repo in repos.items(): if repo_name in {"found-snapshots"}: continue if repo["type"] == "fs": snapshots = client.snapshot.get( repository=repo_name, snapshot="_all", ignore_unavailable=True ) for snapshot in snapshots["snapshots"]: if snapshot["state"] == "IN_PROGRESS": in_progress_snapshots.append(snapshot) else: client.options(ignore_status=404).snapshot.delete( repository=repo_name, snapshot=snapshot["snapshot"], ) client.options(ignore_status=404).snapshot.delete_repository(name=repo_name) assert in_progress_snapshots == [] def wipe_data_streams(client): try: client.indices.delete_data_stream(name="*", expand_wildcards="all") except Exception: client.indices.delete_data_stream(name="*") def wipe_indices(client): indices = client.cat.indices().strip().splitlines() if len(indices) > 0: index_names = [i.split()[2] for i in indices] client.options(ignore_status=404).indices.delete( index=",".join(index_names), expand_wildcards="all", ) def wait_for_cluster_state_updates_to_finish(client, timeout=30): end_time = time.time() + timeout while time.time() < end_time: if not client.cluster.pending_tasks().get("tasks", ()): break python-elasticsearch-9.1.1/utils/000077500000000000000000000000001506101734100170115ustar00rootroot00000000000000python-elasticsearch-9.1.1/utils/build-dists.py000066400000000000000000000235341506101734100216150ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """A command line tool for building and verifying releases Can be used for building both 'elasticsearch' and 'elasticsearchX' dists. Only requires 'name' in 'pyproject.toml' and the directory to be changed. """ import contextlib import os import re import shlex import shutil import sys import tempfile base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) tmp_dir = None @contextlib.contextmanager def set_tmp_dir(): global tmp_dir tmp_dir = tempfile.mkdtemp() yield tmp_dir shutil.rmtree(tmp_dir) tmp_dir = None def run(*argv, expect_exit_code=0): try: prev_dir = os.getcwd() if tmp_dir is None: os.chdir(base_dir) else: os.chdir(tmp_dir) cmd = shlex.join(argv) print("$ " + cmd) exit_code = os.system(cmd) if exit_code != expect_exit_code: print( "Command exited incorrectly: should have been %d was %d" % (expect_exit_code, exit_code) ) exit(exit_code or 1) finally: os.chdir(prev_dir) def test_dist(dist): with set_tmp_dir() as tmp_dir: dist_name = re.match(r"^(elasticsearch\d*)-", os.path.basename(dist)).group(1) # Build the venv and install the dist run("python", "-m", "venv", os.path.join(tmp_dir, "venv")) venv_python = os.path.join(tmp_dir, "venv/bin/python") run( venv_python, "-m", "pip", "install", "-U", "pip", "mypy", "numpy", "pandas-stubs", "opentelemetry-api", ) run(venv_python, "-m", "pip", "install", dist) # Test the sync namespaces run(venv_python, "-c", f"from {dist_name} import Elasticsearch") run( venv_python, "-c", f"from {dist_name}.helpers import scan, bulk, streaming_bulk, reindex", ) run( venv_python, "-c", f"from {dist_name} import Elasticsearch, AsyncElasticsearch", ) run( venv_python, "-c", f"from {dist_name}.helpers import scan, bulk, streaming_bulk, reindex, async_scan, async_bulk, async_streaming_bulk, async_reindex", ) # Install aiohttp and see that async is now available run(venv_python, "-m", "pip", "install", "aiohttp") run(venv_python, "-c", f"from {dist_name} import AsyncElasticsearch") run( venv_python, "-c", f"from {dist_name}.helpers import async_scan, async_bulk, async_streaming_bulk, async_reindex", ) # Only need to test 'async_types' for non-aliased package # since 'aliased_types' tests both async and sync. if dist_name == "elasticsearch": run( venv_python, "-m", "mypy", "--strict", "--install-types", "--non-interactive", "--ignore-missing-imports", "--implicit-reexport", os.path.join(base_dir, "test_elasticsearch/test_types/async_types.py"), ) # Ensure that the namespaces are correct for the dist for suffix in ("", "1", "2", "5", "6", "7", "8", "9", "10"): distx_name = f"elasticsearch{suffix}" run( venv_python, "-c", f"import {distx_name}", expect_exit_code=256 if distx_name != dist_name else 0, ) # Check that sync types work for 'elasticsearch' and # that aliased types work for 'elasticsearchX' if dist_name == "elasticsearch": run( venv_python, "-m", "mypy", "--strict", "--install-types", "--non-interactive", "--ignore-missing-imports", "--implicit-reexport", os.path.join(base_dir, "test_elasticsearch/test_types/sync_types.py"), ) else: run( venv_python, "-m", "mypy", "--strict", "--install-types", "--non-interactive", "--ignore-missing-imports", "--implicit-reexport", os.path.join( base_dir, "test_elasticsearch/test_types/aliased_types.py" ), ) # Uninstall the dist, see that we can't import things anymore run(venv_python, "-m", "pip", "uninstall", "--yes", dist_name) run( venv_python, "-c", f"from {dist_name} import Elasticsearch", expect_exit_code=256, ) def main(): run("git", "checkout", "--", "pyproject.toml", "elasticsearch/") run("rm", "-rf", "dist") # Grab the major version to be used as a suffix. version_path = os.path.join(base_dir, "elasticsearch/_version.py") with open(version_path) as f: version = re.search( r"^__versionstr__\s+=\s+[\"\']([^\"\']+)[\"\']", f.read(), re.M ).group(1) major_version = version.split(".")[0] # If we're handed a version from the build manager we # should check that the version is correct or write # a new one. if len(sys.argv) >= 2: # 'build_version' is what the release manager wants, # 'expect_version' is what we're expecting to compare # the package version to before building the dists. build_version = expect_version = sys.argv[1] # Any prefixes in the version specifier mean we're making # a pre-release which will modify __versionstr__ locally # and not produce a git tag. if any(x in build_version for x in ("-SNAPSHOT", "-rc", "-alpha", "-beta")): # If a snapshot, then we add '+dev' if "-SNAPSHOT" in build_version: version = version + "+dev" # alpha/beta/rc -> aN/bN/rcN else: pre_number = re.search(r"-(a|b|rc)(?:lpha|eta|)(\d+)$", expect_version) version = version + pre_number.group(1) + pre_number.group(2) expect_version = re.sub( r"(?:-(?:SNAPSHOT|alpha\d+|beta\d+|rc\d+))+$", "", expect_version ) if expect_version.endswith(".x"): expect_version = expect_version[:-1] # For snapshots we ensure that the version in the package # at least *starts* with the version. This is to support # build_version='7.x-SNAPSHOT'. if not version.startswith(expect_version): print( "Version of package (%s) didn't match the " "expected release version (%s)" % (version, build_version) ) exit(1) # A release that will be tagged, we want # there to be no '+dev', etc. elif expect_version != version: print( "Version of package (%s) didn't match the " "expected release version (%s)" % (version, build_version) ) exit(1) for suffix in ("", major_version): run("rm", "-rf", "build/", "*.egg-info", ".eggs") # Rename the module to fit the suffix. shutil.move( os.path.join(base_dir, "elasticsearch"), os.path.join(base_dir, f"elasticsearch{suffix}"), ) # Ensure that the version within 'elasticsearch/_version.py' is correct. version_path = os.path.join(base_dir, f"elasticsearch{suffix}/_version.py") with open(version_path) as f: version_data = f.read() version_data = re.sub( r"__versionstr__ = \"[^\"]+\"", f'__versionstr__ = "{version}"', version_data, ) with open(version_path, "w") as f: f.truncate() f.write(version_data) # Rewrite pyproject.toml with the new name. pyproject_toml_path = os.path.join(base_dir, "pyproject.toml") with open(pyproject_toml_path) as f: pyproject_toml = f.read() with open(pyproject_toml_path, "w") as f: f.truncate() f.write(pyproject_toml.replace("elasticsearch", f"elasticsearch{suffix}")) # Build the sdist/wheels run("python", "-m", "build") # Clean up everything. run("git", "checkout", "--", "pyproject.toml", "elasticsearch/") if suffix: run("rm", "-rf", f"elasticsearch{suffix}/") # Test everything that got created dists = os.listdir(os.path.join(base_dir, "dist")) assert len(dists) == 4 for dist in dists: test_dist(os.path.join(base_dir, "dist", dist)) os.system('bash -c "chmod a+w dist/*"') # After this run 'python -m twine upload dist/*' print( "\n\n" "===============================\n\n" " * Releases are ready! *\n\n" "$ python -m twine upload dist/*\n\n" "===============================" ) if __name__ == "__main__": main() python-elasticsearch-9.1.1/utils/bump-version.py000066400000000000000000000052131506101734100220120ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Command line tool which changes the branch to be ready to build and test the given Elastic stack version. """ import re import sys from pathlib import Path SOURCE_DIR = Path(__file__).absolute().parent.parent def find_and_replace(path, pattern, replace): # Does a find and replace within a file path and complains # if the given pattern isn't found in the file. with open(path) as f: old_data = f.read() if re.search(pattern, old_data, flags=re.MULTILINE) is None: print(f"Didn't find the pattern {pattern!r} in {path!s}") exit(1) new_data = re.sub(pattern, replace, old_data, flags=re.MULTILINE) with open(path, "w") as f: f.truncate() f.write(new_data) def main(): if len(sys.argv) != 2: print("usage: utils/bump-version.py [stack version]") exit(1) stack_version = sys.argv[1] try: python_version = re.search(r"^([0-9][0-9\.]*[0-9]+)", stack_version).group(1) except AttributeError: print(f"Couldn't match the given stack version {stack_version!r}") exit(1) # Pad the version value with .0 until there # we have the major, minor, and patch. for _ in range(3): if len(python_version.split(".")) >= 3: break python_version += ".0" find_and_replace( path=SOURCE_DIR / "elasticsearch/_version.py", pattern=r"__versionstr__ = \"[0-9]+[0-9\.]*[0-9](?:\+dev)?\"", replace=f'__versionstr__ = "{python_version}"', ) # These values should always be the 'major.minor-SNAPSHOT' major_minor_version = ".".join(python_version.split(".")[:2]) find_and_replace( path=SOURCE_DIR / ".buildkite/pipeline.yml", pattern=r'STACK_VERSION:\s+"[0-9]+[0-9\.]*[0-9](?:\-SNAPSHOT)?"', replace=f'STACK_VERSION: "{major_minor_version}.0-SNAPSHOT"', ) if __name__ == "__main__": main() python-elasticsearch-9.1.1/utils/dsl-generator.py000066400000000000000000001264471506101734100221470ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import re import subprocess import textwrap from urllib.error import HTTPError from urllib.request import urlopen from jinja2 import Environment, PackageLoader, select_autoescape jinja_env = Environment( loader=PackageLoader("utils"), autoescape=select_autoescape(), trim_blocks=True, lstrip_blocks=True, ) field_py = jinja_env.get_template("field.py.tpl") query_py = jinja_env.get_template("query.py.tpl") aggs_py = jinja_env.get_template("aggs.py.tpl") response_init_py = jinja_env.get_template("response.__init__.py.tpl") types_py = jinja_env.get_template("types.py.tpl") # map with name replacements for Elasticsearch attributes PROP_REPLACEMENTS = {"from": "from_", "global": "global_"} # map with Elasticsearch type replacements # keys and values are in given in "{namespace}:{name}" format TYPE_REPLACEMENTS = { "_types.query_dsl:DistanceFeatureQuery": "_types.query_dsl:DistanceFeatureQueryBase", } # some aggregation types are complicated to determine from the schema, so they # have their correct type here AGG_TYPES = { "bucket_count_ks_test": "Pipeline", "bucket_correlation": "Pipeline", "bucket_sort": "Bucket", "categorize_text": "Bucket", "filter": "Bucket", "moving_avg": "Pipeline", "variable_width_histogram": "Bucket", } def property_to_class_name(name): return "".join([w.title() if w != "ip" else "IP" for w in name.split("_")]) def wrapped_doc(text, width=70, initial_indent="", subsequent_indent=""): """Formats a docstring as a list of lines of up to the request width.""" return textwrap.wrap( text.replace("\n", " "), width=width, initial_indent=initial_indent, subsequent_indent=subsequent_indent, ) def add_dict_type(type_): """Add Dict[str, Any] to a Python type hint.""" if type_.startswith("Union["): type_ = f"{type_[:-1]}, Dict[str, Any]]" else: type_ = f"Union[{type_}, Dict[str, Any]]" return type_ def add_seq_dict_type(type_): """Add Sequence[Dict[str, Any]] to a Python type hint.""" if type_.startswith("Union["): type_ = f"{type_[:-1]}, Sequence[Dict[str, Any]]]" else: type_ = f"Union[{type_}, Sequence[Dict[str, Any]]]" return type_ def add_not_set(type_): """Add DefaultType to a Python type hint.""" if type_.startswith("Union["): type_ = f'{type_[:-1]}, "DefaultType"]' else: type_ = f'Union[{type_}, "DefaultType"]' return type_ def type_for_types_py(type_): """Converts a type rendered in a generic way to the format needed in the types.py module. """ type_ = type_.replace('"DefaultType"', "DefaultType") type_ = type_.replace('"InstrumentedField"', "InstrumentedField") type_ = re.sub(r'"(function\.[a-zA-Z0-9_]+)"', r"\1", type_) type_ = re.sub(r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', type_) type_ = re.sub(r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", type_) return type_ class ElasticsearchSchema: """Operations related to the Elasticsearch schema.""" def __init__(self, version="main"): response = None for branch in [version, "main"]: url = f"https://raw.githubusercontent.com/elastic/elasticsearch-specification/{branch}/output/schema/schema.json" try: response = urlopen(url) print(f"Initializing code generation with '{branch}' specification.") break except HTTPError: continue if not response: raise RuntimeError("Could not download Elasticsearch schema") self.schema = json.loads(response.read()) # Interfaces collects interfaces that are seen while traversing the schema. # Any interfaces collected here are then rendered as Python in the # types.py module. self.interfaces = [] self.response_interfaces = [] def find_type(self, name, namespace=None): for t in self.schema["types"]: if t["name"]["name"] == name and ( namespace is None or t["name"]["namespace"] == namespace ): return t def inherits_from(self, type_, name, namespace=None): while "inherits" in type_: type_ = self.find_type( type_["inherits"]["type"]["name"], type_["inherits"]["type"]["namespace"], ) if type_["name"]["name"] == name and ( namespace is None or type_["name"]["namespace"] == namespace ): return True return False def get_python_type(self, schema_type, for_response=False): """Obtain Python typing details for a given schema type This method returns a tuple. The first element is a string with the Python type hint. The second element is a dictionary with Python DSL specific typing details to be stored in the DslBase._param_defs attribute (or None if the type does not need to be in _param_defs). When `for_response` is `False`, any new interfaces that are discovered are registered to be generated in "request" style, with alternative Dict type hints and default values. If `for_response` is `True`, interfaces are generated just with their declared type, without Dict alternative and without defaults, to help type checkers be more effective at parsing response expressions. """ if schema_type["kind"] == "instance_of": type_name = schema_type["type"] if type_name["namespace"] in ["_types", "internal", "_builtins"]: if type_name["name"] in ["integer", "uint", "long", "ulong"]: return "int", None elif type_name["name"] in ["number", "float", "double"]: return "float", None elif type_name["name"] == "string": return "str", None elif type_name["name"] == "boolean": return "bool", None elif type_name["name"] == "binary": return "bytes", None elif type_name["name"] == "null": return "None", None elif type_name["name"] == "Field": if for_response: return "str", None else: return 'Union[str, "InstrumentedField"]', None else: # not an instance of a native type, so we get the type and try again return self.get_python_type( self.find_type(type_name["name"], type_name["namespace"]), for_response=for_response, ) elif ( type_name["namespace"] == "_types.query_dsl" and type_name["name"] == "QueryContainer" ): # QueryContainer maps to the DSL's Query class return "Query", {"type": "query"} elif ( type_name["namespace"] == "_global.search._types" and type_name["name"] == "SearchRequestBody" ): # we currently do not provide specific typing for this one return "Dict[str, Any]", None elif ( type_name["namespace"] == "_types.query_dsl" and type_name["name"] == "FunctionScoreContainer" ): # FunctionScoreContainer maps to the DSL's ScoreFunction class return "ScoreFunction", {"type": "score_function"} elif ( type_name["namespace"] == "_types.aggregations" and type_name["name"] == "Buckets" ): if for_response: return "Union[Sequence[Any], Dict[str, Any]]", None else: return "Dict[str, Query]", {"type": "query", "hash": True} elif ( type_name["namespace"] == "_types.aggregations" and type_name["name"] == "CompositeAggregationSource" ): # CompositeAggreagationSource maps to the DSL's Agg class return "Agg[_R]", None else: # for any other instances we get the type and recurse type_ = self.find_type(type_name["name"], type_name["namespace"]) if type_: return self.get_python_type(type_, for_response=for_response) elif schema_type["kind"] == "type_alias": # for an alias, we use the aliased type return self.get_python_type(schema_type["type"], for_response=for_response) elif schema_type["kind"] == "array_of": # for arrays we use Sequence[element_type] type_, param = self.get_python_type( schema_type["value"], for_response=for_response ) return f"Sequence[{type_}]", {**param, "multi": True} if param else None elif schema_type["kind"] == "dictionary_of": # for dicts we use Mapping[key_type, value_type] key_type, key_param = self.get_python_type( schema_type["key"], for_response=for_response ) value_type, value_param = self.get_python_type( schema_type["value"], for_response=for_response ) return f"Mapping[{key_type}, {value_type}]", ( {**value_param, "hash": True} if value_param else None ) elif schema_type["kind"] == "union_of": if ( len(schema_type["items"]) == 2 and schema_type["items"][0]["kind"] == "instance_of" and schema_type["items"][1]["kind"] == "array_of" and schema_type["items"][0] == schema_type["items"][1]["value"] ): # special kind of unions in the form Union[type, Sequence[type]] type_, param = self.get_python_type( schema_type["items"][0], for_response=for_response ) if schema_type["items"][0]["type"]["name"] in [ "CompletionSuggestOption", "PhraseSuggestOption", "TermSuggestOption", ]: # for suggest types we simplify this type and return just the array form return ( f"Sequence[{type_}]", ({"type": param["type"], "multi": True} if param else None), ) else: # for every other types we produce an union with the two alternatives return ( f"Union[{type_}, Sequence[{type_}]]", ({"type": param["type"], "multi": True} if param else None), ) elif ( len(schema_type["items"]) == 2 and schema_type["items"][0]["kind"] == "instance_of" and schema_type["items"][1]["kind"] == "instance_of" and schema_type["items"][0]["type"] == {"name": "T", "namespace": "_spec_utils.PipeSeparatedFlags"} and schema_type["items"][1]["type"] == {"name": "string", "namespace": "_builtins"} ): # for now we treat PipeSeparatedFlags as a special case if "PipeSeparatedFlags" not in self.interfaces: self.interfaces.append("PipeSeparatedFlags") return '"types.PipeSeparatedFlags"', None else: # generic union type types = list( dict.fromkeys( # eliminate duplicates [ self.get_python_type(t, for_response=for_response) for t in schema_type["items"] ] ) ) if len(types) == 1: return types[0] return "Union[" + ", ".join([type_ for type_, _ in types]) + "]", None elif schema_type["kind"] == "enum": # enums are mapped to Literal[member, ...] t = ( "Literal[" + ", ".join( [f"\"{member['name']}\"" for member in schema_type["members"]] ) + "]" ) if {"name": "true"} in schema_type["members"] and { "name": "false" } in schema_type["members"]: # this is a boolean that was later upgraded to an enum, so we # should also allow bools t = f"Union[{t}, bool]" return t, None elif schema_type["kind"] == "interface": if schema_type["name"]["namespace"] == "_types.query_dsl": # handle specific DSL classes explicitly to map to existing # Python DSL classes if schema_type["name"]["name"].endswith("RangeQuery"): return '"wrappers.Range[Any]"', None elif schema_type["name"]["name"].endswith("ScoreFunction"): name = schema_type["name"]["name"].removesuffix("Function") return f'"function.{name}"', None elif schema_type["name"]["name"].endswith("DecayFunction"): return '"function.DecayFunction"', None elif schema_type["name"]["name"].endswith("Function"): return f"\"function.{schema_type['name']['name']}\"", None elif schema_type["name"]["namespace"] == "_types.analysis" and schema_type[ "name" ]["name"].endswith("Analyzer"): # not expanding analyzers at this time, maybe in the future return "str, Dict[str, Any]", None elif schema_type["name"]["namespace"] == "_types.aggregations": if ( schema_type["name"]["name"].endswith("AggregationRange") or schema_type["name"]["name"] == "DateRangeExpression" ) and schema_type["name"]["name"] != "IpRangeAggregationRange": return '"wrappers.AggregationRange"', None # to handle other interfaces we generate a type of the same name # and add the interface to the interfaces.py module if schema_type["name"]["name"] not in self.interfaces: self.interfaces.append(schema_type["name"]["name"]) if for_response: self.response_interfaces.append(schema_type["name"]["name"]) return f"\"types.{schema_type['name']['name']}\"", None elif schema_type["kind"] == "user_defined_value": # user_defined_value maps to Python's Any type return "Any", None raise RuntimeError(f"Cannot find Python type for {schema_type}") def add_attribute(self, k, arg, for_types_py=False, for_response=False): """Add an attribute to the internal representation of a class. This method adds the argument `arg` to the data structure for a class stored in `k`. In particular, the argument is added to the `k["args"]` list, making sure required arguments are first in the list. If the argument is of a type that needs Python DSL specific typing details to be stored in the DslBase._param_defs attribute, then this is added to `k["params"]`. When `for_types_py` is `True`, type hints are formatted in the most convenient way for the types.py file. When possible, double quotes are removed from types, and for types that are in the same file the quotes are kept to prevent forward references, but the "types." namespace is removed. When `for_types_py` is `False`, all non-native types use quotes and are namespaced. When `for_response` is `True`, type hints are not given the optional dictionary representation, nor the `DefaultType` used for omitted attributes. """ try: type_, param = self.get_python_type(arg["type"], for_response=for_response) except RuntimeError: type_ = "Any" param = None if not for_response: if type_ != "Any": if ( 'Sequence["types.' in type_ or 'Sequence["wrappers.AggregationRange' in type_ ): type_ = add_seq_dict_type(type_) # interfaces can be given as dicts elif "types." in type_ or "wrappers.AggregationRange" in type_: type_ = add_dict_type(type_) # interfaces can be given as dicts type_ = add_not_set(type_) if for_types_py: type_ = type_for_types_py(type_) required = "(required) " if arg["required"] else "" server_default = ( f" Defaults to `{arg['serverDefault']}` if omitted." if arg.get("serverDefault") else "" ) doc = wrapped_doc( f":arg {arg['name']}: {required}{arg.get('description', '')}{server_default}", subsequent_indent=" ", ) arg = { "name": PROP_REPLACEMENTS.get(arg["name"], arg["name"]), "type": type_, "doc": doc, "required": arg["required"], } if param is not None: param = {"name": arg["name"], "param": param} if arg["required"]: # insert in the right place so that all required arguments # appear at the top of the argument list i = 0 for i in range(len(k["args"]) + 1): if i == len(k["args"]): break if k["args"][i].get("positional"): continue if k["args"][i]["required"] is False: break k["args"].insert(i, arg) else: k["args"].append(arg) if param and "params" in k: k["params"].append(param) def add_behaviors(self, type_, k, for_types_py=False, for_response=False): """Add behaviors reported in the specification of the given type to the class representation. """ if "behaviors" in type_: for behavior in type_["behaviors"]: if ( behavior["type"]["name"] != "AdditionalProperty" or behavior["type"]["namespace"] != "_spec_utils" ): # we do not support this behavior, so we ignore it continue key_type, _ = self.get_python_type( behavior["generics"][0], for_response=for_response ) if "InstrumentedField" in key_type: value_type, _ = self.get_python_type( behavior["generics"][1], for_response=for_response ) if for_types_py: value_type = value_type.replace('"DefaultType"', "DefaultType") value_type = value_type.replace( '"InstrumentedField"', "InstrumentedField" ) value_type = re.sub( r'"(function\.[a-zA-Z0-9_]+)"', r"\1", value_type ) value_type = re.sub( r'"types\.([a-zA-Z0-9_]+)"', r'"\1"', value_type ) value_type = re.sub( r'"(wrappers\.[a-zA-Z0-9_]+)"', r"\1", value_type ) k["args"].append( { "name": "_field", "type": add_not_set(key_type), "doc": [":arg _field: The field to use in this query."], "required": False, "positional": True, } ) k["args"].append( { "name": "_value", "type": add_not_set(add_dict_type(value_type)), "doc": [":arg _value: The query value for the field."], "required": False, "positional": True, } ) k["is_single_field"] = True else: raise RuntimeError( f"Non-field AdditionalProperty are not supported for interface {type_['name']['namespace']}:{type_['name']['name']}." ) def property_to_python_class(self, p): """Return a dictionary with template data necessary to render a schema property as a Python class. Used for "container" sub-classes such as `QueryContainer`, where each sub-class is represented by a Python DSL class. The format is as follows: ```python { "property_name": "the name of the property", "name": "the class name to use for the property", "docstring": "the formatted docstring as a list of strings", "args": [ # a Python description of each class attribute "name": "the name of the attribute", "type": "the Python type hint for the attribute", "doc": ["formatted lines of documentation to add to class docstring"], "required": bool, "positional": bool, ], "params": [ "name": "the attribute name", "param": "the param dictionary to include in `_param_defs` for the class", ], # a DSL-specific description of interesting attributes "is_single_field": bool # True for single-key dicts with field key "is_multi_field": bool # True for multi-key dicts with field keys } ``` """ k = { "property_name": p["name"], "name": property_to_class_name(p["name"]), } k["docstring"] = wrapped_doc(p.get("description") or "") other_classes = [] kind = p["type"]["kind"] if kind == "instance_of": namespace = p["type"]["type"]["namespace"] name = p["type"]["type"]["name"] if f"{namespace}:{name}" in TYPE_REPLACEMENTS: namespace, name = TYPE_REPLACEMENTS[f"{namespace}:{name}"].split(":") if name == "QueryContainer" and namespace == "_types.query_dsl": type_ = { "kind": "interface", "properties": [p], } else: type_ = self.find_type(name, namespace) if p["name"] in AGG_TYPES: k["parent"] = AGG_TYPES[p["name"]] if type_["kind"] == "interface": # set the correct parent for bucket and pipeline aggregations if self.inherits_from( type_, "PipelineAggregationBase", "_types.aggregations" ): k["parent"] = "Pipeline" elif self.inherits_from( type_, "BucketAggregationBase", "_types.aggregations" ): k["parent"] = "Bucket" # generate class attributes k["args"] = [] k["params"] = [] self.add_behaviors(type_, k) while True: for arg in type_["properties"]: self.add_attribute(k, arg) if "inherits" in type_ and "type" in type_["inherits"]: type_ = self.find_type( type_["inherits"]["type"]["name"], type_["inherits"]["type"]["namespace"], ) else: break elif type_["kind"] == "type_alias": if type_["type"]["kind"] == "union_of": # for unions we create sub-classes for other in type_["type"]["items"]: other_class = self.interface_to_python_class( other["type"]["name"], other["type"]["namespace"], for_types_py=False, ) other_class["parent"] = k["name"] other_classes.append(other_class) else: raise RuntimeError( "Cannot generate code for instances of type_alias instances that are not unions." ) else: raise RuntimeError( f"Cannot generate code for instances of kind '{type_['kind']}'" ) elif kind == "dictionary_of": key_type, _ = self.get_python_type(p["type"]["key"]) if "InstrumentedField" in key_type: value_type, _ = self.get_python_type(p["type"]["value"]) if p["type"]["singleKey"]: # special handling for single-key dicts with field key k["args"] = [ { "name": "_field", "type": add_not_set(key_type), "doc": [":arg _field: The field to use in this query."], "required": False, "positional": True, }, { "name": "_value", "type": add_not_set(add_dict_type(value_type)), "doc": [":arg _value: The query value for the field."], "required": False, "positional": True, }, ] k["is_single_field"] = True else: # special handling for multi-key dicts with field keys k["args"] = [ { "name": "_fields", "type": f"Optional[Mapping[{key_type}, {value_type}]]", "doc": [ ":arg _fields: A dictionary of fields with their values." ], "required": False, "positional": True, }, ] k["is_multi_field"] = True else: raise RuntimeError(f"Cannot generate code for type {p['type']}") else: raise RuntimeError(f"Cannot generate code for type {p['type']}") return [k] + other_classes def interface_to_python_class( self, interface, namespace=None, *, for_types_py=True, for_response=False, ): """Return a dictionary with template data necessary to render an interface a Python class. This is used to render interfaces that are referenced by container classes. The current list of rendered interfaces is passed as a second argument to allow this method to add more interfaces to it as they are discovered. The returned format is as follows: ```python { "name": "the class name to use for the interface class", "parent": "the parent class name", "args": [ # a Python description of each class attribute "name": "the name of the attribute", "type": "the Python type hint for the attribute", "doc": ["formatted lines of documentation to add to class docstring"], "required": bool, "positional": bool, ], "buckets_as_dict": "type" # optional, only present in aggregation response # classes that have buckets that can have a list # or dict representation } ``` """ type_ = self.find_type(interface, namespace) if type_["kind"] not in ["interface", "response"]: raise RuntimeError(f"Type {interface} is not an interface") if type_["kind"] == "response": # we consider responses as interfaces because they also have properties # but the location of the properties is different type_ = type_["body"] k = {"name": interface, "for_response": for_response, "args": []} k["docstring"] = wrapped_doc(type_.get("description") or "") self.add_behaviors( type_, k, for_types_py=for_types_py, for_response=for_response ) generics = [] while True: for arg in type_["properties"]: if interface == "ResponseBody" and arg["name"] == "hits": k["args"].append( { "name": "hits", "type": "Sequence[_R]", "doc": [":arg hits: search results"], "required": arg["required"], } ) elif interface == "ResponseBody" and arg["name"] == "aggregations": # Aggregations are tricky because the DSL client uses a # flexible representation that is difficult to generate # from the schema. # To handle this we let the generator do its work by calling # `add_attribute()`, but then we save the generated attribute # apart and replace it with the DSL's `AggResponse` class. # The generated type is then used in type hints in variables # and methods of this class. self.add_attribute( k, arg, for_types_py=for_types_py, for_response=for_response ) k["aggregate_type"] = ( k["args"][-1]["type"] .split("Mapping[str, ")[1] .rsplit("]", 1)[0] ) k["args"][-1] = { "name": "aggregations", "type": '"AggResponse[_R]"', "doc": [":arg aggregations: aggregation results"], "required": arg["required"], } elif ( "name" in type_ and type_["name"]["name"] == "MultiBucketAggregateBase" and arg["name"] == "buckets" ): # Also during aggregation response generation, the "buckets" # attribute that many aggregation responses have is very # complex, supporting over a dozen different aggregation # types via generics, each in array or object configurations. # Typing this attribute proved very difficult. A solution # that worked with mypy and pyright is to type "buckets" # for the list form, and create a `buckets_as_dict` # property that is typed appropriately for accessing the # buckets in dictionary form. # The generic type is assumed to be the first in the list, # which is a simplification that should be improved when a # more complete implementation of generics is added. if generics[0]["type"]["name"] == "Void": generic_type = "Any" else: _g = self.find_type( generics[0]["type"]["name"], generics[0]["type"]["namespace"], ) generic_type, _ = self.get_python_type( _g, for_response=for_response ) generic_type = type_for_types_py(generic_type) k["args"].append( { "name": arg["name"], # for the type we only include the array form, since # this client does not request the dict form "type": f"Sequence[{generic_type}]", "doc": [ ":arg buckets: (required) the aggregation buckets as a list" ], "required": True, } ) k["buckets_as_dict"] = generic_type elif namespace == "_types.mapping": if arg["name"] in ["fields", "properties"]: # Python DSL provides a high level representation for the # "fields" and 'properties' properties that many types support k["args"].append( { "name": arg["name"], "type": 'Union[Mapping[str, Field], "DefaultType"]', "doc": [f":arg {arg['name']}:"], "required": False, } ) if "params" not in k: k["params"] = [] k["params"].append( { "name": arg["name"], "param": {"type": "field", "hash": True}, } ) else: # also the Python DSL provides implementations of analyzers # and normalizers, so here we make sure these are noted as # params and have an appropriate type hint. self.add_attribute( k, arg, for_types_py=for_types_py, for_response=for_response ) if arg["name"].endswith("analyzer"): if "params" not in k: k["params"] = [] k["params"].append( {"name": arg["name"], "param": {"type": "analyzer"}} ) k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]' elif arg["name"].endswith("normalizer"): if "params" not in k: k["params"] = [] k["params"].append( {"name": arg["name"], "param": {"type": "normalizer"}} ) k["args"][-1]["type"] = 'Union[str, DslBase, "DefaultType"]' else: if interface == "Hit" and arg["name"].startswith("_"): # Python DSL removes the undersore prefix from all the # properties of the hit, so we do the same arg["name"] = arg["name"][1:] self.add_attribute( k, arg, for_types_py=for_types_py, for_response=for_response ) if "inherits" not in type_ or "type" not in type_["inherits"]: break if "generics" in type_["inherits"]: # Generics are only supported for certain specific cases at this # time. Here we just save them so that they can be recalled later # while traversing over to parent classes to find inherited # attributes. for generic_type in type_["inherits"]["generics"]: generics.append(generic_type) type_ = self.find_type( type_["inherits"]["type"]["name"], type_["inherits"]["type"]["namespace"], ) return k def generate_field_py(schema, filename): """Generate field.py with all the Elasticsearch fields as Python classes.""" float_fields = ["half_float", "scaled_float", "double", "rank_feature"] integer_fields = ["byte", "short", "long"] range_fields = [ "integer_range", "float_range", "long_range", "double_range", "date_range", ] object_fields = ["nested"] coerced_fields = [ "boolean", "date", "float", "object", "dense_vector", "integer", "ip", "binary", "percolator", ] classes = [] property = schema.find_type("Property", "_types.mapping") for type_ in property["type"]["items"]: if type_["type"]["name"] == "DynamicProperty": # no support for dynamic properties continue field = schema.find_type(type_["type"]["name"], type_["type"]["namespace"]) name = class_name = "" for prop in field["properties"]: if prop["name"] == "type": if prop["type"]["kind"] != "literal_value": raise RuntimeError(f"Unexpected property type {prop}") name = prop["type"]["value"] class_name = "".join([n.title() for n in name.split("_")]) k = schema.interface_to_python_class( type_["type"]["name"], type_["type"]["namespace"], for_types_py=False, for_response=False, ) k["name"] = class_name k["field"] = name k["coerced"] = name in coerced_fields if name in float_fields: k["parent"] = "Float" elif name in integer_fields: k["parent"] = "Integer" elif name in range_fields: k["parent"] = "RangeField" elif name in object_fields: k["parent"] = "Object" else: k["parent"] = "Field" k["args"] = [prop for prop in k["args"] if prop["name"] != "type"] if name == "object": # the DSL's object field has a doc_class argument k["args"] = [ { "name": "doc_class", "type": 'Union[Type["InnerDoc"], "DefaultType"]', "doc": [ ":arg doc_class: base doc class that handles mapping.", " If no `doc_class` is provided, new instance of `InnerDoc` will be created,", " populated with `properties` and used. Can not be provided together with `properties`", ], "positional": True, "required": False, } ] + k["args"] elif name == "date": k["args"] = [ { "name": "default_timezone", "type": 'Union[str, "tzinfo", "DefaultType"]', "doc": [ ":arg default_timezone: timezone that will be automatically used for tz-naive values", " May be instance of `datetime.tzinfo` or string containing TZ offset", ], "positional": True, "required": False, } ] + k["args"] classes.append(k) # make sure parent classes appear first classes = sorted( classes, key=lambda k: ( f'AA{k["name"]}' if k["name"] in ["Float", "Integer", "Object"] else k["name"] ), ) with open(filename, "w") as f: f.write(field_py.render(classes=classes)) print(f"Generated {filename}.") def generate_query_py(schema, filename): """Generate query.py with all the properties of `QueryContainer` as Python classes. """ classes = [] query_container = schema.find_type("QueryContainer", "_types.query_dsl") for p in query_container["properties"]: classes += schema.property_to_python_class(p) with open(filename, "w") as f: f.write(query_py.render(classes=classes, parent="Query")) print(f"Generated {filename}.") def generate_aggs_py(schema, filename): """Generate aggs.py with all the properties of `AggregationContainer` as Python classes. """ classes = [] aggs_container = schema.find_type("AggregationContainer", "_types.aggregations") for p in aggs_container["properties"]: if "containerProperty" not in p or not p["containerProperty"]: classes += schema.property_to_python_class(p) with open(filename, "w") as f: f.write(aggs_py.render(classes=classes, parent="Agg")) print(f"Generated {filename}.") def generate_response_init_py(schema, filename): """Generate response/__init__.py with all the response properties documented and typed. """ search_response = schema.interface_to_python_class( "ResponseBody", "_global.search", for_types_py=False, for_response=True, ) ubq_response = schema.interface_to_python_class( "Response", "_global.update_by_query", for_types_py=False, for_response=True, ) with open(filename, "w") as f: f.write( response_init_py.render(response=search_response, ubq_response=ubq_response) ) print(f"Generated {filename}.") def generate_types_py(schema, filename): """Generate types.py""" classes = {} for interface in schema.interfaces: if interface == "PipeSeparatedFlags": continue # handled as a special case for_response = interface in schema.response_interfaces k = schema.interface_to_python_class( interface, for_types_py=True, for_response=for_response ) classes[k["name"]] = k # sort classes by being request/response and then by name sorted_classes = sorted( list(classes.keys()), key=lambda i: str(int(i in schema.response_interfaces)) + i, ) classes_list = [] for n in sorted_classes: k = classes[n] if k in classes_list: continue classes_list.append(k) with open(filename, "w") as f: f.write(types_py.render(classes=classes_list)) print(f"Generated {filename}.") if __name__ == "__main__": v = subprocess.check_output(["git", "branch", "--show-current"]).strip().decode() schema = ElasticsearchSchema(v) generate_field_py(schema, "elasticsearch/dsl/field.py") generate_query_py(schema, "elasticsearch/dsl/query.py") generate_aggs_py(schema, "elasticsearch/dsl/aggs.py") generate_response_init_py(schema, "elasticsearch/dsl/response/__init__.py") generate_types_py(schema, "elasticsearch/dsl/types.py") python-elasticsearch-9.1.1/utils/generate-docs-examples/000077500000000000000000000000001506101734100233455ustar00rootroot00000000000000python-elasticsearch-9.1.1/utils/generate-docs-examples/README.md000066400000000000000000000005141506101734100246240ustar00rootroot00000000000000This directory contains a script that generates the Python documentation examples in `docs/examples`. To use this script you need a recent version of Node.js (18+). First install the dependencies: ```bash cd utils/generate-docs-examples npm install ``` Then run the script as follows: ```bash node generate-docs-examples.js ``` python-elasticsearch-9.1.1/utils/generate-docs-examples/generate-docs-examples.js000066400000000000000000000063601506101734100302440ustar00rootroot00000000000000/* * Licensed to Elasticsearch B.V. under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch B.V. licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ const { join } = require('path') const { writeFile } = require('fs/promises') const fetch = require('node-fetch') const rimraf = require('rimraf') const ora = require('ora') const { convertRequests } = require('@elastic/request-converter') const minimist = require('minimist') const docsExamplesDir = join(__dirname, '../../docs', 'examples') const log = ora('Generating example snippets') const failures = {} async function getAlternativesReport (version = 'master') { const reportUrl = `https://raw.githubusercontent.com/elastic/built-docs/master/raw/en/elasticsearch/reference/${version}/alternatives_report.json` const response = await fetch(reportUrl) if (!response.ok) { log.fail(`unexpected response ${response.statusText}`) process.exit(1) } return await response.json() } async function makeSnippet (example) { const { source, digest } = example const fileName = `${digest}.asciidoc` const filePath = join(docsExamplesDir, fileName) try { const code = await convertRequests(source, 'python', { complete: false, printResponse: true }) await writeFile(filePath, asciidocWrapper(code, example), 'utf8') } catch (err) { failures[digest] = err.message } } async function generate (version) { log.start() rimraf.sync(join(docsExamplesDir, '*')) log.text = `Downloading alternatives report for version ${version}` const examples = await getAlternativesReport(version) let counter = 1 for (const example of examples) { log.text = `${counter++}/${examples.length}: ${example.digest}` // skip over bad request definitions if (example.source.startsWith('{') || example.source.endsWith('...')) { failures[example.digest] = 'Incomplete request syntax' continue } await makeSnippet(example) } } function asciidocWrapper (source, example) { return `// This file is autogenerated, DO NOT EDIT // ${example.source_location.file}:${example.source_location.line} [source, python] ---- ${source.trim()} ---- ` } const options = minimist(process.argv.slice(2), { string: ['version'], default: { version: 'master' } }) generate(options.version) .then(() => log.succeed('done!')) .catch(err => log.fail(err.message)) .finally(() => { const keys = Object.keys(failures) if (keys.length > 0) { let message = 'Some examples failed to generate:\n\n' for (const key of keys) { message += `${key}: ${failures[key]}\n` } console.error(message) } }) python-elasticsearch-9.1.1/utils/generate-docs-examples/package-lock.json000066400000000000000000000567571506101734100266050ustar00rootroot00000000000000{ "name": "generate-docs-examples", "version": "1.0.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "generate-docs-examples", "version": "1.0.0", "license": "Apache-2.0", "dependencies": { "@elastic/request-converter": "^8.15.2", "minimist": "^1.2.6", "node-fetch": "^2.6.7", "ora": "^5.4.1", "rimraf": "^3.0.2" } }, "node_modules/@elastic/request-converter": { "version": "8.18.0", "resolved": "https://registry.npmjs.org/@elastic/request-converter/-/request-converter-8.18.0.tgz", "integrity": "sha512-xEIB17voGulAfBThFqqtk8Osc+dNHiCqN9GW0Nf6PunNdvmAT5YvMb6u4NNI+NPAxNu90ak396g+ThjH9VRGIw==", "license": "Apache-2.0", "dependencies": { "base64url": "^3.0.1", "commander": "^12.1.0", "find-my-way-ts": "^0.1.2", "handlebars": "^4.7.8", "prettier": "^2.8.8" }, "bin": { "es-request-converter": "dist/es-request-converter.js" } }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/ansi-styles": { "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "license": "MIT", "dependencies": { "color-convert": "^2.0.1" }, "engines": { "node": ">=8" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "license": "MIT" }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT" }, "node_modules/base64url": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/base64url/-/base64url-3.0.1.tgz", "integrity": "sha512-ir1UPr3dkwexU7FdV8qBBbNDRUhMmIekYMFZfi+C/sLNnRESKPl23nB9b2pltqfOQNnGzsDdId90AEtG5tCx4A==", "license": "MIT", "engines": { "node": ">=6.0.0" } }, "node_modules/bl": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", "license": "MIT", "dependencies": { "buffer": "^5.5.0", "inherits": "^2.0.4", "readable-stream": "^3.4.0" } }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "license": "MIT", "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" } }, "node_modules/buffer": { "version": "5.7.1", "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT", "dependencies": { "base64-js": "^1.3.1", "ieee754": "^1.1.13" } }, "node_modules/chalk": { "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "license": "MIT", "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/chalk/chalk?sponsor=1" } }, "node_modules/cli-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz", "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", "license": "MIT", "dependencies": { "restore-cursor": "^3.1.0" }, "engines": { "node": ">=8" } }, "node_modules/cli-spinners": { "version": "2.9.2", "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", "license": "MIT", "engines": { "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/clone": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/clone/-/clone-1.0.4.tgz", "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", "license": "MIT", "engines": { "node": ">=0.8" } }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", "license": "MIT", "dependencies": { "color-name": "~1.1.4" }, "engines": { "node": ">=7.0.0" } }, "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "license": "MIT" }, "node_modules/commander": { "version": "12.1.0", "resolved": "https://registry.npmjs.org/commander/-/commander-12.1.0.tgz", "integrity": "sha512-Vw8qHK3bZM9y/P10u3Vib8o/DdkvA2OtPtZvD871QKjy74Wj1WSKFILMPRPSdUSx5RFK1arlJzEtA4PkFgnbuA==", "license": "MIT", "engines": { "node": ">=18" } }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", "license": "MIT" }, "node_modules/defaults": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/defaults/-/defaults-1.0.4.tgz", "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", "license": "MIT", "dependencies": { "clone": "^1.0.2" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/find-my-way-ts": { "version": "0.1.5", "resolved": "https://registry.npmjs.org/find-my-way-ts/-/find-my-way-ts-0.1.5.tgz", "integrity": "sha512-4GOTMrpGQVzsCH2ruUn2vmwzV/02zF4q+ybhCIrw/Rkt3L8KWcycdC6aJMctJzwN4fXD4SD5F/4B9Sksh5rE0A==", "license": "MIT" }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", "license": "ISC" }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", "deprecated": "Glob versions prior to v9 are no longer supported", "license": "ISC", "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^3.1.1", "once": "^1.3.0", "path-is-absolute": "^1.0.0" }, "engines": { "node": "*" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/handlebars": { "version": "4.7.8", "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.8.tgz", "integrity": "sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==", "license": "MIT", "dependencies": { "minimist": "^1.2.5", "neo-async": "^2.6.2", "source-map": "^0.6.1", "wordwrap": "^1.0.0" }, "bin": { "handlebars": "bin/handlebars" }, "engines": { "node": ">=0.4.7" }, "optionalDependencies": { "uglify-js": "^3.1.4" } }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/ieee754": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "BSD-3-Clause" }, "node_modules/inflight": { "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", "license": "ISC", "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "license": "ISC" }, "node_modules/is-interactive": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-1.0.0.tgz", "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", "license": "MIT", "engines": { "node": ">=8" } }, "node_modules/is-unicode-supported": { "version": "0.1.0", "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", "license": "MIT", "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/log-symbols": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", "license": "MIT", "dependencies": { "chalk": "^4.1.0", "is-unicode-supported": "^0.1.0" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/mimic-fn": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", "license": "MIT", "engines": { "node": ">=6" } }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "license": "ISC", "dependencies": { "brace-expansion": "^1.1.7" }, "engines": { "node": "*" } }, "node_modules/minimist": { "version": "1.2.8", "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", "license": "MIT", "funding": { "url": "https://github.com/sponsors/ljharb" } }, "node_modules/neo-async": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz", "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", "license": "MIT" }, "node_modules/node-fetch": { "version": "2.7.0", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", "license": "MIT", "dependencies": { "whatwg-url": "^5.0.0" }, "engines": { "node": "4.x || >=6.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "peerDependenciesMeta": { "encoding": { "optional": true } } }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", "license": "ISC", "dependencies": { "wrappy": "1" } }, "node_modules/onetime": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", "license": "MIT", "dependencies": { "mimic-fn": "^2.1.0" }, "engines": { "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/ora": { "version": "5.4.1", "resolved": "https://registry.npmjs.org/ora/-/ora-5.4.1.tgz", "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", "license": "MIT", "dependencies": { "bl": "^4.1.0", "chalk": "^4.1.0", "cli-cursor": "^3.1.0", "cli-spinners": "^2.5.0", "is-interactive": "^1.0.0", "is-unicode-supported": "^0.1.0", "log-symbols": "^4.1.0", "strip-ansi": "^6.0.0", "wcwidth": "^1.0.1" }, "engines": { "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/path-is-absolute": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", "license": "MIT", "engines": { "node": ">=0.10.0" } }, "node_modules/prettier": { "version": "2.8.8", "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.8.8.tgz", "integrity": "sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==", "license": "MIT", "bin": { "prettier": "bin-prettier.js" }, "engines": { "node": ">=10.13.0" }, "funding": { "url": "https://github.com/prettier/prettier?sponsor=1" } }, "node_modules/readable-stream": { "version": "3.6.2", "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", "license": "MIT", "dependencies": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", "util-deprecate": "^1.0.1" }, "engines": { "node": ">= 6" } }, "node_modules/restore-cursor": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz", "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", "license": "MIT", "dependencies": { "onetime": "^5.1.0", "signal-exit": "^3.0.2" }, "engines": { "node": ">=8" } }, "node_modules/rimraf": { "version": "3.0.2", "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", "deprecated": "Rimraf versions prior to v4 are no longer supported", "license": "ISC", "dependencies": { "glob": "^7.1.3" }, "bin": { "rimraf": "bin.js" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", "funding": [ { "type": "github", "url": "https://github.com/sponsors/feross" }, { "type": "patreon", "url": "https://www.patreon.com/feross" }, { "type": "consulting", "url": "https://feross.org/support" } ], "license": "MIT" }, "node_modules/signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "license": "ISC" }, "node_modules/source-map": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", "license": "BSD-3-Clause", "engines": { "node": ">=0.10.0" } }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", "license": "MIT", "dependencies": { "safe-buffer": "~5.2.0" } }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", "license": "MIT", "dependencies": { "ansi-regex": "^5.0.1" }, "engines": { "node": ">=8" } }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "license": "MIT", "dependencies": { "has-flag": "^4.0.0" }, "engines": { "node": ">=8" } }, "node_modules/tr46": { "version": "0.0.3", "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", "license": "MIT" }, "node_modules/uglify-js": { "version": "3.19.3", "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.19.3.tgz", "integrity": "sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==", "license": "BSD-2-Clause", "optional": true, "bin": { "uglifyjs": "bin/uglifyjs" }, "engines": { "node": ">=0.8.0" } }, "node_modules/util-deprecate": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", "license": "MIT" }, "node_modules/wcwidth": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/wcwidth/-/wcwidth-1.0.1.tgz", "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", "license": "MIT", "dependencies": { "defaults": "^1.0.3" } }, "node_modules/webidl-conversions": { "version": "3.0.1", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", "license": "BSD-2-Clause" }, "node_modules/whatwg-url": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", "license": "MIT", "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "node_modules/wordwrap": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==", "license": "MIT" }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", "license": "ISC" } } } python-elasticsearch-9.1.1/utils/generate-docs-examples/package.json000066400000000000000000000006541506101734100256400ustar00rootroot00000000000000{ "name": "generate-docs-examples", "version": "1.0.0", "main": "generate-docs-examples.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "keywords": [], "author": "", "license": "Apache-2.0", "description": "", "dependencies": { "@elastic/request-converter": "^8.15.2", "minimist": "^1.2.6", "node-fetch": "^2.6.7", "ora": "^5.4.1", "rimraf": "^3.0.2" } } python-elasticsearch-9.1.1/utils/generate-examples.py000066400000000000000000000236521506101734100230010ustar00rootroot00000000000000#!/usr/bin/env python # Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections import json import os import tempfile from pathlib import Path import black from click.testing import CliRunner from jinja2 import Environment, FileSystemLoader code_root = Path(__file__).absolute().parent.parent asciidocs_dir = code_root / "docs/examples" flight_recorder_dir = code_root.parent / "clients-flight-recorder" report_path = flight_recorder_dir / "recordings/docs/parsed-alternative-report.json" substitutions = {"type": "doc_type", "from": "from_"} jinja_env = Environment( loader=FileSystemLoader([code_root / "utils" / "templates"]), trim_blocks=True, lstrip_blocks=True, ) files_to_generate = [ "search/request-body.asciidoc", "mapping.asciidoc", "query-dsl.asciidoc", "query-dsl/query-string-query.asciidoc", "getting-started.asciidoc", "query-dsl/query_filter_context.asciidoc", "query-dsl/bool-query.asciidoc", "query-dsl/match-query.asciidoc", "indices/create-index.asciidoc", "docs/index_.asciidoc", "aggregations/bucket/terms-aggregation.asciidoc", "query-dsl/range-query.asciidoc", "search/search.asciidoc", "query-dsl/multi-match-query.asciidoc", "docs/bulk.asciidoc", "indices/delete-index.asciidoc", "indices/put-mapping.asciidoc", "docs/reindex.asciidoc", "query-dsl/term-query.asciidoc", "indices/templates.asciidoc", "getting-started.asciidoc", "docs/update.asciidoc", "query-dsl/match-all-query.asciidoc", "docs/get.asciidoc", "query-dsl/wildcard-query.asciidoc", "query-dsl/exists-query.asciidoc", "docs/delete-by-query.asciidoc", "mapping/params/format.asciidoc", "mapping/types/nested.asciidoc", "query-dsl/terms-query.asciidoc", "search/request/sort.asciidoc", "mapping/types/date.asciidoc", "indices/update-settings.asciidoc", "indices/aliases.asciidoc", "setup/install/check-running.asciidoc", "query-dsl/regexp-query.asciidoc", "query-dsl/function-score-query.asciidoc", "search/request/from-size.asciidoc", "cluster/health.asciidoc", "query-dsl/nested-query.asciidoc", "mapping/types/array.asciidoc", "mapping/params/fielddata.asciidoc", "search/count.asciidoc", "mapping/types/keyword.asciidoc", "docs/update-by-query.asciidoc", "search/suggesters.asciidoc", "api-conventions.asciidoc", "cat/indices.asciidoc", "query-dsl/match-phrase-query.asciidoc", "indices/get-index.asciidoc", "setup/logging-config.asciidoc", "docs/delete.asciidoc", "aggregations/metrics/valuecount-aggregation.asciidoc", "indices/get-mapping.asciidoc", "aggregations/bucket/filter-aggregation.asciidoc", "aggregations/bucket/datehistogram-aggregation.asciidoc", "mapping/types/numeric.asciidoc", "search/request/scroll.asciidoc", "mapping/fields/id-field.asciidoc", "search.asciidoc", "mapping/params/multi-fields.asciidoc", "cluster/allocation-explain.asciidoc", "cluster/get-settings.asciidoc", "cluster/update-settings.asciidoc", "health/health.asciidoc", "cluster/reroute.asciidoc", "inference/get-inference.asciidoc", "inference/delete-inference.asciidoc", "inference/post-inference.asciidoc", "inference/put-inference.asciidoc", "ml/trained-models/apis/clear-trained-model-deployment-cache.asciidoc", "ml/trained-models/apis/delete-trained-models-aliases.asciidoc", "ml/trained-models/apis/delete-trained-models.asciidoc", "ml/trained-models/apis/get-trained-models-stats.asciidoc", "ml/trained-models/apis/get-trained-models.asciidoc", "ml/trained-models/apis/infer-trained-model-deployment.asciidoc", "ml/trained-models/apis/infer-trained-model.asciidoc", "ml/trained-models/apis/put-trained-model-definition-part.asciidoc", "ml/trained-models/apis/put-trained-model-vocabulary.asciidoc", "ml/trained-models/apis/put-trained-models-aliases.asciidoc", "ml/trained-models/apis/put-trained-models.asciidoc", "ml/trained-models/apis/start-trained-model-deployment.asciidoc", "ml/trained-models/apis/stop-trained-model-deployment.asciidoc", "ml/trained-models/apis/update-trained-model-deployment.asciidoc", "setup/run-elasticsearch-locally.asciidoc", "setup/important-settings.asciidoc", "setup/secure-settings.asciidoc", "modules/cluster.asciidoc", "modules/cluster/misc.asciidoc", "modules/network.asciidoc", "modules/indices/request_cache.asciidoc", "setup/advanced-configuration.asciidoc", "setup/sysconfig/swap.asciidoc", "setup/sysconfig/file-descriptors.asciidoc", "modules/discovery/voting.asciidoc", "setup/add-nodes.asciidoc", "setup/restart-cluster.asciidoc", "modules/cluster/remote-clusters-api-key.asciidoc", "modules/cluster/remote-clusters-cert.asciidoc", "modules/discovery/voting.asciidoc", "modules/cluster/remote-clusters-migration.asciidoc", "modules/cluster/remote-clusters-troubleshooting.asciidoc", "upgrade/archived-settings.asciidoc", "index-modules/allocation/filtering.asciidoc", "index-modules/allocation/delayed.asciidoc", "index-modules/allocation/prioritization.asciidoc", "index-modules/allocation/total_shards.asciidoc", "index-modules/allocation/data_tier_allocation.asciidoc", "index-modules/blocks.asciidoc", "index-modules/similarity.asciidoc", "index-modules/slowlog.asciidoc", "index-modules/store.asciidoc", "index-modules/index-sorting.asciidoc", "index-modules/indexing-pressure.asciidoc", "mapping/dynamic-mapping.asciidoc", "mapping/dynamic/field-mapping.asciidoc", "mapping/dynamic/templates.asciidoc", "mapping/explicit-mapping.asciidoc", "mapping/runtime.asciidoc", "mapping/runtime.asciidoc", "mapping/types.asciidoc", "mapping/types/aggregate-metric-double.asciidoc", "mapping/types/alias.asciidoc", "mapping/types/array.asciidoc", "mapping/types/binary.asciidoc", "mapping/types/boolean.asciidoc", "mapping/types/completion.asciidoc", "mapping/types/date_nanos.asciidoc", "mapping/types/dense-vector.asciidoc", "mapping/types/flattened.asciidoc", "mapping/types/geo-point.asciidoc", "mapping/types/geo-shape.asciidoc", "mapping/types/histogram.asciidoc", "mapping/types/ip.asciidoc", "mapping/types/parent-join.asciidoc", "mapping/types/object.asciidoc", "mapping/types/percolator.asciidoc", "mapping/types/point.asciidoc", "mapping/types/range.asciidoc", "mapping/types/rank-feature.asciidoc", "mapping/types/rank-features.asciidoc", "mapping/types/search-as-you-type.asciidoc", "mapping/types/semantic-text.asciidoc", "mapping/types/shape.asciidoc", "mapping/types/sparse-vector.asciidoc", "mapping/types/text.asciidoc", "mapping/types/token-count.asciidoc", "mapping/types/unsigned_long.asciidoc", "mapping/types/version.asciidoc", ] ParsedSource = collections.namedtuple("ParsedSource", ["api", "params", "body"]) def blacken(filename): runner = CliRunner() result = runner.invoke( black.main, [str(filename), "--line-length=75", "--target-version=py37"] ) assert result.exit_code == 0, result.output def main(): for filepath in asciidocs_dir.iterdir(): if filepath.name.endswith(".asciidoc"): filepath.unlink() if not flight_recorder_dir.exists() or not report_path.exists(): raise RuntimeError( f"clients-flight-recorder repository not checked out at {flight_recorder_dir}" ) with report_path.open() as f: report = json.loads(f.read()) t = jinja_env.get_template("example") for exm in report: if exm["lang"] != "console": continue if exm["source_location"]["file"] not in files_to_generate: continue parsed_sources = [] for src in exm["parsed_source"]: params = (src.get("params") or {}).copy() params.update(src.get("query") or {}) params = { k: (list(v.split(",")) if isinstance(v, str) and "," in v else v) for k, v in params.items() } parsed_sources.append( ParsedSource( api=src["api"], params={ substitutions.get(k, k): repr(v) for k, v in params.items() }, body=src.get("body", None) or None, ) ) with tempfile.NamedTemporaryFile("w+", delete=False) as tmp_file: tmp_file.write(t.render(parsed_sources=parsed_sources)) try: blacken(tmp_file.name) except AssertionError: loc = exm["source_location"] print(f"Failed to format {loc['file']}:{loc['line']}, skipping.") continue with open(tmp_file.name) as f: data = f.read() data = data.rstrip().replace(",)", ")") os.unlink(tmp_file.name) with (asciidocs_dir / f"{exm['digest']}.asciidoc").open(mode="w") as f: f.truncate() f.write( f"""// {exm['source_location']['file']}:{exm['source_location']['line']} [source, python] ---- {data} ----""" ) if __name__ == "__main__": main() python-elasticsearch-9.1.1/utils/license-headers.py000066400000000000000000000104701506101734100224200ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Script which verifies that all source files have a license header. Has two modes: 'fix' and 'check'. 'fix' fixes problems, 'check' will error out if 'fix' would have changed the file. """ import os import re import sys from itertools import chain from typing import Iterator, List lines_to_keep = ["# -*- coding: utf-8 -*-\n", "#!/usr/bin/env python\n"] license_header_lines = [ "# Licensed to Elasticsearch B.V. under one or more contributor\n", "# license agreements. See the NOTICE file distributed with\n", "# this work for additional information regarding copyright\n", "# ownership. Elasticsearch B.V. licenses this file to you under\n", '# the Apache License, Version 2.0 (the "License"); you may\n', "# not use this file except in compliance with the License.\n", "# You may obtain a copy of the License at\n", "#\n", "# http://www.apache.org/licenses/LICENSE-2.0\n", "#\n", "# Unless required by applicable law or agreed to in writing,\n", "# software distributed under the License is distributed on an\n", '# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n', "# KIND, either express or implied. See the License for the\n", "# specific language governing permissions and limitations\n", "# under the License.\n", "\n", ] def find_files_to_fix(sources: List[str]) -> Iterator[str]: """Iterates over all files and dirs in 'sources' and returns only the filepaths that need fixing. """ for source in sources: if os.path.isfile(source) and does_file_need_fix(source): yield source elif os.path.isdir(source): for root, _, filenames in os.walk(source): for filename in filenames: filepath = os.path.join(root, filename) if does_file_need_fix(filepath): yield filepath def does_file_need_fix(filepath: str) -> bool: if not re.search(r"\.pyi?$", filepath): return False with open(filepath) as f: first_license_line = None for line in f: if line == license_header_lines[0]: first_license_line = line break elif line not in lines_to_keep: return True for header_line, line in zip( license_header_lines, chain((first_license_line,), f) ): if line != header_line: return True return False def add_header_to_file(filepath: str) -> None: with open(filepath) as f: lines = list(f) i = 0 for i, line in enumerate(lines): if line not in lines_to_keep: break lines = lines[:i] + license_header_lines + lines[i:] with open(filepath, mode="w") as f: f.truncate() f.write("".join(lines)) print(f"Fixed {os.path.relpath(filepath, os.getcwd())}") def main(): mode = sys.argv[1] assert mode in ("fix", "check") sources = [os.path.abspath(x) for x in sys.argv[2:]] files_to_fix = find_files_to_fix(sources) if mode == "fix": for filepath in files_to_fix: add_header_to_file(filepath) else: no_license_headers = list(files_to_fix) if no_license_headers: print("No license header found in:") cwd = os.getcwd() [ print(f" - {os.path.relpath(filepath, cwd)}") for filepath in no_license_headers ] sys.exit(1) else: print("All files had license header") if __name__ == "__main__": main() python-elasticsearch-9.1.1/utils/run-unasync-dsl.py000066400000000000000000000126241506101734100224320ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import subprocess import sys from glob import glob from pathlib import Path import unasync def main(check=False): # the list of directories that need to be processed with unasync # each entry has two paths: # - the source path with the async sources # - the destination path where the sync sources should be written source_dirs = [ ( "elasticsearch/dsl/_async/", "elasticsearch/dsl/_sync/", ), ("test_elasticsearch/test_dsl/_async/", "test_elasticsearch/test_dsl/_sync/"), ( "test_elasticsearch/test_dsl/test_integration/_async/", "test_elasticsearch/test_dsl/test_integration/_sync/", ), ( "test_elasticsearch/test_dsl/test_integration/test_examples/_async/", "test_elasticsearch/test_dsl/test_integration/test_examples/_sync/", ), ("examples/dsl/async/", "examples/dsl/"), ] # Unasync all the generated async code additional_replacements = { "_async": "_sync", "AsyncElasticsearch": "Elasticsearch", "AsyncSearch": "Search", "AsyncMultiSearch": "MultiSearch", "AsyncEmptySearch": "EmptySearch", "AsyncDocument": "Document", "AsyncIndexMeta": "IndexMeta", "AsyncIndexTemplate": "IndexTemplate", "AsyncIndex": "Index", "AsyncComposableIndexTemplate": "ComposableIndexTemplate", "AsyncUpdateByQuery": "UpdateByQuery", "AsyncMapping": "Mapping", "AsyncFacetedSearch": "FacetedSearch", "AsyncUsingType": "UsingType", "async_connections": "connections", "async_scan": "scan", "async_simulate": "simulate", "async_bulk": "bulk", "async_mock_client": "mock_client", "async_client": "client", "async_data_client": "data_client", "async_write_client": "write_client", "async_pull_request": "pull_request", "async_examples": "examples", "async_sleep": "sleep", "assert_awaited_once_with": "assert_called_once_with", "pytest_asyncio": "pytest", "asynccontextmanager": "contextmanager", } rules = [ unasync.Rule( fromdir=dir[0], todir=f"{dir[0]}_sync_check/" if check else dir[1], additional_replacements=additional_replacements, ) for dir in source_dirs ] filepaths = [] for root, _, filenames in os.walk(Path(__file__).absolute().parent.parent): if "/site-packages" in root or "/." in root or "__pycache__" in root: continue for filename in filenames: if filename.rpartition(".")[-1] in ( "py", "pyi", ) and not filename.startswith("utils.py"): filepaths.append(os.path.join(root, filename)) unasync.unasync_files(filepaths, rules) output_dirs = [] for dir in source_dirs: output_dirs.append(f"{dir[0]}_sync_check/" if check else dir[1]) subprocess.check_call(["black", "--target-version=py38", *output_dirs]) subprocess.check_call(["isort", *output_dirs]) for dir, output_dir in zip(source_dirs, output_dirs): for file in glob("*.py", root_dir=dir[0]): # remove asyncio from sync files subprocess.check_call( ["sed", "-i.bak", "/^import asyncio$/d", f"{output_dir}{file}"] ) subprocess.check_call( [ "sed", "-i.bak", "s/asyncio\\.run(main())/main()/", f"{output_dir}{file}", ] ) subprocess.check_call( [ "sed", "-i.bak", 's/"elasticsearch\\[async\\]"/elasticsearch/', f"{output_dir}{file}", ] ) subprocess.check_call( [ "sed", "-i.bak", "s/pytest.mark.asyncio/pytest.mark.sync/", f"{output_dir}{file}", ] ) subprocess.check_call(["rm", f"{output_dir}{file}.bak"]) if check: # make sure there are no differences between _sync and _sync_check subprocess.check_call( [ "diff", f"{dir[1]}{file}", f"{output_dir}{file}", ] ) if check: subprocess.check_call(["rm", "-rf", output_dir]) if __name__ == "__main__": main(check="--check" in sys.argv) python-elasticsearch-9.1.1/utils/run-unasync.py000066400000000000000000000104211506101734100216430ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import subprocess import sys from glob import glob from pathlib import Path import unasync def cleanup(source_dir: Path, output_dir: Path, patterns: list[str]): for file in glob("*.py", root_dir=source_dir): path = output_dir / file for pattern in patterns: subprocess.check_call(["sed", "-i.bak", pattern, str(path)]) subprocess.check_call(["rm", f"{path}.bak"]) def run( rule: unasync.Rule, cleanup_patterns: list[str] = [], check: bool = False, ): root_dir = Path(__file__).absolute().parent.parent source_dir = root_dir / rule.fromdir.lstrip("/") output_dir = check_dir = root_dir / rule.todir.lstrip("/") if check: rule.todir += "_sync_check/" output_dir = root_dir / rule.todir.lstrip("/") filepaths = [] for root, _, filenames in os.walk(source_dir): for filename in filenames: if filename.rpartition(".")[-1] in { "py", "pyi", } and not filename.startswith("utils.py"): filepaths.append(os.path.join(root, filename)) unasync.unasync_files(filepaths, [rule]) if cleanup_patterns: cleanup(source_dir, output_dir, cleanup_patterns) if check: subprocess.check_call(["black", output_dir]) subprocess.check_call(["isort", "--profile=black", output_dir]) # make sure there are no differences between _sync and _sync_check for file in glob("*.py", root_dir=output_dir): subprocess.check_call( [ "diff", f"{check_dir}/{file}", f"{output_dir}/{file}", ] ) subprocess.check_call(["rm", "-rf", output_dir]) def main(check: bool = False): run( rule=unasync.Rule( fromdir="/elasticsearch/_async/client/", todir="/elasticsearch/_sync/client/", additional_replacements={ # We want to rewrite to 'Transport' instead of 'SyncTransport', etc "AsyncTransport": "Transport", "AsyncElasticsearch": "Elasticsearch", # We don't want to rewrite this class "AsyncSearchClient": "AsyncSearchClient", # Handling typing.Awaitable[...] isn't done yet by unasync. "_TYPE_ASYNC_SNIFF_CALLBACK": "_TYPE_SYNC_SNIFF_CALLBACK", }, ), check=check, ) run( rule=unasync.Rule( fromdir="elasticsearch/helpers/vectorstore/_async/", todir="elasticsearch/helpers/vectorstore/_sync/", additional_replacements={ "AsyncBM25Strategy": "BM25Strategy", "AsyncDenseVectorStrategy": "DenseVectorStrategy", "AsyncDenseVectorScriptScoreStrategy": "DenseVectorScriptScoreStrategy", "AsyncElasticsearch": "Elasticsearch", "AsyncElasticsearchEmbeddings": "ElasticsearchEmbeddings", "AsyncEmbeddingService": "EmbeddingService", "AsyncRetrievalStrategy": "RetrievalStrategy", "AsyncSparseVectorStrategy": "SparseVectorStrategy", "AsyncTransport": "Transport", "AsyncVectorStore": "VectorStore", "async_bulk": "bulk", "_async": "_sync", }, ), cleanup_patterns=[ "/^import asyncio$/d", ], check=check, ) if __name__ == "__main__": main(check="--check" in sys.argv) python-elasticsearch-9.1.1/utils/templates/000077500000000000000000000000001506101734100210075ustar00rootroot00000000000000python-elasticsearch-9.1.1/utils/templates/aggs.py.tpl000066400000000000000000000235231506101734100231050ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import deepcopy from typing import ( TYPE_CHECKING, Any, ClassVar, Dict, Generic, Iterable, Literal, Mapping, MutableMapping, Optional, Sequence, Union, cast, ) from elastic_transport.client_utils import DEFAULT from .query import Query from .response.aggs import AggResponse, BucketData, FieldBucketData, TopHitsData from .utils import _R, AttrDict, DslBase from . import wrappers if TYPE_CHECKING: from elastic_transport.client_utils import DefaultType from . import types from .document_base import InstrumentedField from .search_base import SearchBase def A( name_or_agg: Union[MutableMapping[str, Any], "Agg[_R]", str], filter: Optional[Union[str, "Query"]] = None, **params: Any, ) -> "Agg[_R]": if filter is not None: if name_or_agg != "filter": raise ValueError( "Aggregation %r doesn't accept positional argument 'filter'." % name_or_agg ) params["filter"] = filter # {"terms": {"field": "tags"}, "aggs": {...}} if isinstance(name_or_agg, collections.abc.MutableMapping): if params: raise ValueError("A() cannot accept parameters when passing in a dict.") # copy to avoid modifying in-place agg = deepcopy(name_or_agg) # pop out nested aggs aggs = agg.pop("aggs", None) # pop out meta data meta = agg.pop("meta", None) # should be {"terms": {"field": "tags"}} if len(agg) != 1: raise ValueError( 'A() can only accept dict with an aggregation ({"terms": {...}}). ' "Instead it got (%r)" % name_or_agg ) agg_type, params = agg.popitem() if aggs: params = params.copy() params["aggs"] = aggs if meta: params = params.copy() params["meta"] = meta return Agg[_R].get_dsl_class(agg_type)(_expand__to_dot=False, **params) # Terms(...) just return the nested agg elif isinstance(name_or_agg, Agg): if params: raise ValueError( "A() cannot accept parameters when passing in an Agg object." ) return name_or_agg # "terms", field="tags" return Agg[_R].get_dsl_class(name_or_agg)(**params) class Agg(DslBase, Generic[_R]): _type_name = "agg" _type_shortcut = staticmethod(A) name = "" def __contains__(self, key: str) -> bool: return False def to_dict(self) -> Dict[str, Any]: d = super().to_dict() if isinstance(d[self.name], dict): n = cast(Dict[str, Any], d[self.name]) if "meta" in n: d["meta"] = n.pop("meta") return d def result(self, search: "SearchBase[_R]", data: Dict[str, Any]) -> AttrDict[Any]: return AggResponse[_R](self, search, data) class AggBase(Generic[_R]): aggs: Dict[str, Agg[_R]] _base: Agg[_R] _params: Dict[str, Any] _param_defs: ClassVar[Dict[str, Any]] = { "aggs": {"type": "agg", "hash": True}, } def __contains__(self, key: str) -> bool: return key in self._params.get("aggs", {}) def __getitem__(self, agg_name: str) -> Agg[_R]: agg = cast( Agg[_R], self._params.setdefault("aggs", {})[agg_name] ) # propagate KeyError # make sure we're not mutating a shared state - whenever accessing a # bucket, return a shallow copy of it to be safe if isinstance(agg, Bucket): agg = A(agg.name, **agg._params) # be sure to store the copy so any modifications to it will affect us self._params["aggs"][agg_name] = agg return agg def __setitem__(self, agg_name: str, agg: Agg[_R]) -> None: self.aggs[agg_name] = A(agg) def __iter__(self) -> Iterable[str]: return iter(self.aggs) def _agg( self, bucket: bool, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> Agg[_R]: agg = self[name] = A(agg_type, *args, **params) # For chaining - when creating new buckets return them... if bucket: return agg # otherwise return self._base so we can keep chaining else: return self._base def metric( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> Agg[_R]: return self._agg(False, name, agg_type, *args, **params) def bucket( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> "Bucket[_R]": return cast("Bucket[_R]", self._agg(True, name, agg_type, *args, **params)) def pipeline( self, name: str, agg_type: Union[Dict[str, Any], Agg[_R], str], *args: Any, **params: Any, ) -> "Pipeline[_R]": return cast("Pipeline[_R]", self._agg(False, name, agg_type, *args, **params)) def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return BucketData(self, search, data) # type: ignore[arg-type] class Bucket(AggBase[_R], Agg[_R]): def __init__(self, **params: Any): super().__init__(**params) # remember self for chaining self._base = self def to_dict(self) -> Dict[str, Any]: d = super(AggBase, self).to_dict() if isinstance(d[self.name], dict): n = cast(AttrDict[Any], d[self.name]) if "aggs" in n: d["aggs"] = n.pop("aggs") return d class Pipeline(Agg[_R]): pass {% for k in classes %} class {{ k.name }}({{ k.parent if k.parent else parent }}[_R]): """ {% for line in k.docstring %} {{ line }} {% endfor %} {% if k.args %} {% if k.docstring %} {% endif %} {% for kwarg in k.args %} {% for line in kwarg.doc %} {{ line }} {% endfor %} {% endfor %} {% endif %} """ {% if k.property_name %} name = "{{ k.property_name }}" {% endif %} {% if k.params %} _param_defs = { {% for param in k.params %} "{{ param.name }}": {{ param.param }}, {% endfor %} {% if k.name == "Filter" or k.name == "Filters" or k.name == "Composite" %} {# Some #} "aggs": {"type": "agg", "hash": True}, {% endif %} } {% endif %} def __init__( self, {% if k.args | length != 1 %} {% for arg in k.args %} {% if arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} {% if k.args and not k.args[-1].positional %} *, {% endif %} {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} {% else %} {# when we have just one argument, we allow it as positional or keyword #} {% for arg in k.args %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endfor %} {% endif %} **kwargs: Any ): {% if k.name == "FunctionScore" %} {# continuation of the FunctionScore shortcut property support from above #} if functions is DEFAULT: functions = [] for name in ScoreFunction._classes: if name in kwargs: functions.append({name: kwargs.pop(name)}) # type: ignore {% elif k.is_single_field %} if _field is not DEFAULT: kwargs[str(_field)] = _value {% elif k.is_multi_field %} if _fields is not DEFAULT: for field, value in _fields.items(): kwargs[str(field)] = value {% endif %} super().__init__( {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}={{ arg.name }}, {% endif %} {% endfor %} **kwargs ) {# what follows is a set of Pythonic enhancements to some of the query classes which are outside the scope of the code generator #} {% if k.name == "Filter" %} def to_dict(self) -> Dict[str, Any]: d = super().to_dict() if isinstance(d[self.name], dict): n = cast(AttrDict[Any], d[self.name]) n.update(n.pop("filter", {})) return d {% elif k.name == "Histogram" or k.name == "DateHistogram" or k.name == "AutoDateHistogram" or k.name == "VariableWidthHistogram" %} def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) {% elif k.name == "Terms" %} def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return FieldBucketData(self, search, data) {% elif k.name == "TopHits" %} def result(self, search: "SearchBase[_R]", data: Any) -> AttrDict[Any]: return TopHitsData(self, search, data) {% endif %} {% endfor %} python-elasticsearch-9.1.1/utils/templates/example000066400000000000000000000003361506101734100223670ustar00rootroot00000000000000{% for src in parsed_sources %} resp = client.{{ src.api }}( {% for key, val in src.params.items() %} {{ key }}={{ val }}, {% endfor %} {% if src.body %} body={{ src.body }}, {% endif %} ) print(resp) {% endfor %}python-elasticsearch-9.1.1/utils/templates/field.py.tpl000066400000000000000000000363631506101734100232550ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import base64 import collections.abc import ipaddress from copy import deepcopy from datetime import date, datetime from typing import ( TYPE_CHECKING, Any, Dict, Iterable, Iterator, Literal, Mapping, Optional, Sequence, Tuple, Type, Union, cast, ) from dateutil import parser, tz from elastic_transport.client_utils import DEFAULT, DefaultType from .exceptions import ValidationException from .query import Q from .utils import AttrDict, AttrList, DslBase from .wrappers import Range if TYPE_CHECKING: from datetime import tzinfo from ipaddress import IPv4Address, IPv6Address from _operator import _SupportsComparison from .document import InnerDoc from .document_base import InstrumentedField from .mapping_base import MappingBase from .query import Query from . import types unicode = str def construct_field( name_or_field: Union[ str, "Field", Dict[str, Any], ], **params: Any, ) -> "Field": # {"type": "text", "analyzer": "snowball"} if isinstance(name_or_field, collections.abc.Mapping): if params: raise ValueError( "construct_field() cannot accept parameters when passing in a dict." ) params = deepcopy(name_or_field) if "type" not in params: # inner object can be implicitly defined if "properties" in params: name = "object" else: raise ValueError('construct_field() needs to have a "type" key.') else: name = params.pop("type") return Field.get_dsl_class(name)(**params) # Text() if isinstance(name_or_field, Field): if params: raise ValueError( "construct_field() cannot accept parameters " "when passing in a construct_field object." ) return name_or_field # "text", analyzer="snowball" return Field.get_dsl_class(name_or_field)(**params) class Field(DslBase): _type_name = "field" _type_shortcut = staticmethod(construct_field) # all fields can be multifields _param_defs = {"fields": {"type": "field", "hash": True}} name = "" _coerce = False def __init__( self, multi: bool = False, required: bool = False, *args: Any, **kwargs: Any ): """ :arg bool multi: specifies whether field can contain array of values :arg bool required: specifies whether field is required """ self._multi = multi self._required = required super().__init__(*args, **kwargs) def __getitem__(self, subfield: str) -> "Field": return cast(Field, self._params.get("fields", {})[subfield]) def _serialize(self, data: Any, skip_empty: bool) -> Any: return data def _safe_serialize(self, data: Any, skip_empty: bool) -> Any: try: return self._serialize(data, skip_empty) except TypeError: # older method signature, without skip_empty return self._serialize(data) # type: ignore[call-arg] def _deserialize(self, data: Any) -> Any: return data def _empty(self) -> Optional[Any]: return None def empty(self) -> Optional[Any]: if self._multi: return AttrList([]) return self._empty() def serialize(self, data: Any, skip_empty: bool = True) -> Any: if isinstance(data, (list, AttrList, tuple)): return list(map(self._safe_serialize, cast(Iterable[Any], data), [skip_empty] * len(data))) return self._safe_serialize(data, skip_empty) def deserialize(self, data: Any) -> Any: if isinstance(data, (list, AttrList, tuple)): data = [ None if d is None else self._deserialize(d) for d in cast(Iterable[Any], data) ] return data if data is None: return None return self._deserialize(data) def clean(self, data: Any) -> Any: if data is not None: data = self.deserialize(data) if data in (None, [], {}) and self._required: raise ValidationException("Value required for this field.") return data def to_dict(self) -> Dict[str, Any]: d = super().to_dict() name, value = cast(Tuple[str, Dict[str, Any]], d.popitem()) value["type"] = name return value class CustomField(Field): name = "custom" _coerce = True def to_dict(self) -> Dict[str, Any]: if isinstance(self.builtin_type, Field): return self.builtin_type.to_dict() d = super().to_dict() d["type"] = self.builtin_type return d class RangeField(Field): _coerce = True _core_field: Optional[Field] = None def _deserialize(self, data: Any) -> Range["_SupportsComparison"]: if isinstance(data, Range): return data data = {k: self._core_field.deserialize(v) for k, v in data.items()} # type: ignore[union-attr] return Range(data) def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]: if data is None: return None if not isinstance(data, collections.abc.Mapping): data = data.to_dict() return {k: self._core_field.serialize(v) for k, v in data.items()} # type: ignore[union-attr] {% for k in classes %} class {{ k.name }}({{ k.parent }}): """ {% for line in k.docstring %} {{ line }} {% endfor %} {% if k.args %} {% if k.docstring %} {% endif %} {% for kwarg in k.args %} {% for line in kwarg.doc %} {{ line }} {% endfor %} {% endfor %} {% endif %} """ name = "{{ k.field }}" {% if k.coerced %} _coerce = True {% endif %} {% if k.name.endswith('Range') %} _core_field = {{ k.name[:-5] }}() {% endif %} {% if k.params %} _param_defs = { {% for param in k.params %} "{{ param.name }}": {{ param.param }}, {% endfor %} } {% endif %} def __init__( self, {% for arg in k.args %} {% if arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} *args: Any, {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} **kwargs: Any ): {% for arg in k.args %} {% if not arg.positional %} if {{ arg.name }} is not DEFAULT: {% if "InstrumentedField" in arg.type %} {% if "Sequence" in arg.type %} if isinstance({{ arg.name }}, list): kwargs["{{ arg.name }}"] = [str(field) for field in {{ arg.name }}] else: kwargs["{{ arg.name }}"] = str({{ arg.name }}) {% else %} kwargs["{{ arg.name }}"] = str({{ arg.name }}) {% endif %} {% else %} kwargs["{{ arg.name }}"] = {{ arg.name }} {% endif %} {% endif %} {% endfor %} {% if k.field == 'object' %} if doc_class is not DEFAULT and (properties is not DEFAULT or dynamic is not DEFAULT): raise ValidationException( "doc_class and properties/dynamic should not be provided together" ) if doc_class is not DEFAULT: self._doc_class: Type["InnerDoc"] = doc_class else: # FIXME import from .document import InnerDoc # no InnerDoc subclass, creating one instead... self._doc_class = type("InnerDoc", (InnerDoc,), {}) for name, field in (properties if properties is not DEFAULT else {}).items(): self._doc_class._doc_type.mapping.field(name, field) if "properties" in kwargs: del kwargs["properties"] if dynamic is not DEFAULT: self._doc_class._doc_type.mapping.meta("dynamic", dynamic) self._mapping: "MappingBase" = deepcopy(self._doc_class._doc_type.mapping) super().__init__(**kwargs) def __getitem__(self, name: str) -> Field: return self._mapping[name] def __contains__(self, name: str) -> bool: return name in self._mapping def _empty(self) -> "InnerDoc": return self._wrap({}) def _wrap(self, data: Dict[str, Any]) -> "InnerDoc": return self._doc_class.from_es(data, data_only=True) def empty(self) -> Union["InnerDoc", AttrList[Any]]: if self._multi: return AttrList[Any]([], self._wrap) return self._empty() def to_dict(self) -> Dict[str, Any]: d = self._mapping.to_dict() d.update(super().to_dict()) return d def _collect_fields(self) -> Iterator[Field]: return self._mapping.properties._collect_fields() def _deserialize(self, data: Any) -> "InnerDoc": # don't wrap already wrapped data if isinstance(data, self._doc_class): return data if isinstance(data, AttrDict): data = data._d_ return self._wrap(data) def _serialize( self, data: Optional[Union[Dict[str, Any], "InnerDoc"]], skip_empty: bool ) -> Optional[Dict[str, Any]]: if data is None: return None # somebody assigned raw dict to the field, we should tolerate that if isinstance(data, collections.abc.Mapping): return data return data.to_dict(skip_empty=skip_empty) def clean(self, data: Any) -> Any: data = super().clean(data) if data is None: return None if isinstance(data, (list, AttrList)): for d in cast(Iterator["InnerDoc"], data): d.full_clean() else: data.full_clean() return data def update(self, other: Any, update_only: bool = False) -> None: if not isinstance(other, Object): # not an inner/nested object, no merge possible return self._mapping.update(other._mapping, update_only) {% elif k.field == "nested" %} kwargs.setdefault("multi", True) super().__init__(*args, **kwargs) {% elif k.field == "date" %} if default_timezone is DEFAULT: self._default_timezone = None elif isinstance(default_timezone, str): self._default_timezone = tz.gettz(default_timezone) else: self._default_timezone = default_timezone super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Union[datetime, date]: if isinstance(data, str): try: data = parser.parse(data) except Exception as e: raise ValidationException( f"Could not parse date from the value ({data!r})", e ) # we treat the yyyy-MM-dd format as a special case if hasattr(self, "format") and self.format == "yyyy-MM-dd": data = data.date() if isinstance(data, datetime): if self._default_timezone and data.tzinfo is None: data = data.replace(tzinfo=self._default_timezone) return cast(datetime, data) if isinstance(data, date): return data if isinstance(data, int): # Divide by a float to preserve milliseconds on the datetime. return datetime.utcfromtimestamp(data / 1000.0) raise ValidationException(f"Could not parse date from the value ({data!r})") {% elif k.field == "boolean" %} super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> bool: if data == "false": return False return bool(data) def clean(self, data: Any) -> Optional[bool]: if data is not None: data = self.deserialize(data) if data is None and self._required: raise ValidationException("Value required for this field.") return data # type: ignore[no-any-return] {% elif k.field == "float" %} super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> float: return float(data) {% elif k.field == "dense_vector" %} self._element_type = kwargs.get("element_type", "float") if self._element_type in ["float", "byte"]: kwargs["multi"] = True super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Any: if self._element_type == "float": return float(data) elif self._element_type == "byte": return int(data) return data {% elif k.field == "scaled_float" %} if 'scaling_factor' not in kwargs: if len(args) > 0: kwargs['scaling_factor'] = args[0] args = args[1:] else: raise TypeError("missing required argument: 'scaling_factor'") super().__init__(*args, **kwargs) {% elif k.field == "integer" %} super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> int: return int(data) {% elif k.field == "ip" %} super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> Union["IPv4Address", "IPv6Address"]: # the ipaddress library for pypy only accepts unicode. return ipaddress.ip_address(unicode(data)) def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]: if data is None: return None return str(data) {% elif k.field == "binary" %} super().__init__(*args, **kwargs) def clean(self, data: str) -> str: # Binary fields are opaque, so there's not much cleaning # that can be done. return data def _deserialize(self, data: Any) -> bytes: return base64.b64decode(data) def _serialize(self, data: Any, skip_empty: bool) -> Optional[str]: if data is None: return None return base64.b64encode(data).decode() {% elif k.field == "percolator" %} super().__init__(*args, **kwargs) def _deserialize(self, data: Any) -> "Query": return Q(data) # type: ignore[no-any-return] def _serialize(self, data: Any, skip_empty: bool) -> Optional[Dict[str, Any]]: if data is None: return None return data.to_dict() # type: ignore[no-any-return] {% else %} super().__init__(*args, **kwargs) {% endif %} {% endfor %} python-elasticsearch-9.1.1/utils/templates/query.py.tpl000066400000000000000000000270031506101734100233260ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import collections.abc from copy import deepcopy from itertools import chain from typing import ( TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Literal, Mapping, MutableMapping, Optional, Protocol, Sequence, TypeVar, Union, cast, overload, ) from elastic_transport.client_utils import DEFAULT # 'SF' looks unused but the test suite assumes it's available # from this module so others are liable to do so as well. from .function import SF # noqa: F401 from .function import ScoreFunction from .utils import DslBase if TYPE_CHECKING: from elastic_transport.client_utils import DefaultType from . import types, wrappers from .document_base import InstrumentedField _T = TypeVar("_T") _M = TypeVar("_M", bound=Mapping[str, Any]) class QProxiedProtocol(Protocol[_T]): _proxied: _T @overload def Q(name_or_query: MutableMapping[str, _M]) -> "Query": ... @overload def Q(name_or_query: "Query") -> "Query": ... @overload def Q(name_or_query: QProxiedProtocol[_T]) -> _T: ... @overload def Q(name_or_query: str = "match_all", **params: Any) -> "Query": ... def Q( name_or_query: Union[ str, "Query", QProxiedProtocol[_T], MutableMapping[str, _M], ] = "match_all", **params: Any, ) -> Union["Query", _T]: # {"match": {"title": "python"}} if isinstance(name_or_query, collections.abc.MutableMapping): if params: raise ValueError("Q() cannot accept parameters when passing in a dict.") if len(name_or_query) != 1: raise ValueError( 'Q() can only accept dict with a single query ({"match": {...}}). ' "Instead it got (%r)" % name_or_query ) name, q_params = deepcopy(name_or_query).popitem() return Query.get_dsl_class(name)(_expand__to_dot=False, **q_params) # MatchAll() if isinstance(name_or_query, Query): if params: raise ValueError( "Q() cannot accept parameters when passing in a Query object." ) return name_or_query # s.query = Q('filtered', query=s.query) if hasattr(name_or_query, "_proxied"): return cast(QProxiedProtocol[_T], name_or_query)._proxied # "match", title="python" return Query.get_dsl_class(name_or_query)(**params) class Query(DslBase): _type_name = "query" _type_shortcut = staticmethod(Q) name: ClassVar[Optional[str]] = None # Add type annotations for methods not defined in every subclass __ror__: ClassVar[Callable[["Query", "Query"], "Query"]] __radd__: ClassVar[Callable[["Query", "Query"], "Query"]] __rand__: ClassVar[Callable[["Query", "Query"], "Query"]] def __add__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__radd__"): return other.__radd__(self) return Bool(must=[self, other]) def __invert__(self) -> "Query": return Bool(must_not=[self]) def __or__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__ror__"): return other.__ror__(self) return Bool(should=[self, other]) def __and__(self, other: "Query") -> "Query": # make sure we give queries that know how to combine themselves # preference if hasattr(other, "__rand__"): return other.__rand__(self) return Bool(must=[self, other]) {% for k in classes %} class {{ k.name }}({{ parent }}): """ {% for line in k.docstring %} {{ line }} {% endfor %} {% if k.args %} {% if k.docstring %} {% endif %} {% for kwarg in k.args %} {% for line in kwarg.doc %} {{ line }} {% endfor %} {% endfor %} {% endif %} """ name = "{{ k.property_name }}" {% if k.params %} _param_defs = { {% for param in k.params %} "{{ param.name }}": {{ param.param }}, {% endfor %} {% if k.name == "FunctionScore" %} {# The FunctionScore class implements a custom solution for the `functions` shortcut property. Until the code generator can support shortcut properties directly that solution is added here #} "filter": {"type": "query"}, {% endif %} } {% endif %} def __init__( self, {% for arg in k.args %} {% if arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} {% if k.args and not k.args[-1].positional %} *, {% endif %} {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} **kwargs: Any ): {% if k.name == "FunctionScore" %} {# continuation of the FunctionScore shortcut property support from above #} if functions is DEFAULT: functions = [] for name in ScoreFunction._classes: if name in kwargs: functions.append({name: kwargs.pop(name)}) # type: ignore[arg-type] {% elif k.is_single_field %} if _field is not DEFAULT: kwargs[str(_field)] = _value {% elif k.is_multi_field %} if _fields is not DEFAULT: for field, value in _fields.items(): kwargs[str(field)] = value {% endif %} super().__init__( {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}={{ arg.name }}, {% endif %} {% endfor %} **kwargs ) {# what follows is a set of Pythonic enhancements to some of the query classes which are outside the scope of the code generator #} {% if k.name == "MatchAll" %} def __add__(self, other: "Query") -> "Query": return other._clone() __and__ = __rand__ = __radd__ = __add__ def __or__(self, other: "Query") -> "MatchAll": return self __ror__ = __or__ def __invert__(self) -> "MatchNone": return MatchNone() EMPTY_QUERY = MatchAll() {% elif k.name == "MatchNone" %} def __add__(self, other: "Query") -> "MatchNone": return self __and__ = __rand__ = __radd__ = __add__ def __or__(self, other: "Query") -> "Query": return other._clone() __ror__ = __or__ def __invert__(self) -> MatchAll: return MatchAll() {% elif k.name == "Bool" %} def __add__(self, other: Query) -> "Bool": q = self._clone() if isinstance(other, Bool): q.must += other.must q.should += other.should q.must_not += other.must_not q.filter += other.filter else: q.must.append(other) return q __radd__ = __add__ def __or__(self, other: Query) -> Query: for q in (self, other): if isinstance(q, Bool) and not any( (q.must, q.must_not, q.filter, getattr(q, "minimum_should_match", None)) ): other = self if q is other else other q = q._clone() if isinstance(other, Bool) and not any( ( other.must, other.must_not, other.filter, getattr(other, "minimum_should_match", None), ) ): q.should.extend(other.should) else: q.should.append(other) return q return Bool(should=[self, other]) __ror__ = __or__ @property def _min_should_match(self) -> int: return getattr( self, "minimum_should_match", 0 if not self.should or (self.must or self.filter) else 1, ) def __invert__(self) -> Query: # Because an empty Bool query is treated like # MatchAll the inverse should be MatchNone if not any(chain(self.must, self.filter, self.should, self.must_not)): return MatchNone() negations: List[Query] = [] for q in chain(self.must, self.filter): negations.append(~q) for q in self.must_not: negations.append(q) if self.should and self._min_should_match: negations.append(Bool(must_not=self.should[:])) if len(negations) == 1: return negations[0] return Bool(should=negations) def __and__(self, other: Query) -> Query: q = self._clone() if isinstance(other, Bool): q.must += other.must q.must_not += other.must_not q.filter += other.filter q.should = [] # reset minimum_should_match as it will get calculated below if "minimum_should_match" in q._params: del q._params["minimum_should_match"] for qx in (self, other): min_should_match = qx._min_should_match # TODO: percentages or negative numbers will fail here # for now we report an error if not isinstance(min_should_match, int) or min_should_match < 0: raise ValueError( "Can only combine queries with positive integer values for minimum_should_match" ) # all subqueries are required if len(qx.should) <= min_should_match: q.must.extend(qx.should) # not all of them are required, use it and remember min_should_match elif not q.should: q.minimum_should_match = min_should_match q.should = qx.should # all queries are optional, just extend should elif q._min_should_match == 0 and min_should_match == 0: q.should.extend(qx.should) # not all are required, add a should list to the must with proper min_should_match else: q.must.append( Bool(should=qx.should, minimum_should_match=min_should_match) ) else: if not (q.must or q.filter) and q.should: q._params.setdefault("minimum_should_match", 1) q.must.append(other) return q __rand__ = __and__ {% elif k.name == "Terms" %} def _setattr(self, name: str, value: Any) -> None: # here we convert any iterables that are not strings to lists if hasattr(value, "__iter__") and not isinstance(value, (str, list, dict)): value = list(value) super()._setattr(name, value) {% endif %} {% endfor %} python-elasticsearch-9.1.1/utils/templates/response.__init__.py.tpl000066400000000000000000000165561506101734100255700ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import ( TYPE_CHECKING, Any, Dict, Generic, Iterator, List, Mapping, Optional, Sequence, Tuple, Union, cast, ) from ..utils import _R, AttrDict, AttrList, _wrap from .hit import Hit, HitMeta if TYPE_CHECKING: from ..aggs import Agg from ..faceted_search_base import FacetedSearchBase from ..search_base import Request, SearchBase from ..update_by_query_base import UpdateByQueryBase from .. import types __all__ = ["Response", "AggResponse", "UpdateByQueryResponse", "Hit", "HitMeta", "AggregateResponseType"] class Response(AttrDict[Any], Generic[_R]): """An Elasticsearch search response. {% for arg in response.args %} {% for line in arg.doc %} {{ line }} {% endfor %} {% endfor %} """ _search: "SearchBase[_R]" _faceted_search: "FacetedSearchBase[_R]" _doc_class: Optional[_R] _hits: List[_R] {% for arg in response.args %} {% if arg.name not in ["hits", "aggregations"] %} {{ arg.name }}: {{ arg.type }} {% endif %} {% endfor %} def __init__( self, search: "Request[_R]", response: Dict[str, Any], doc_class: Optional[_R] = None, ): super(AttrDict, self).__setattr__("_search", search) super(AttrDict, self).__setattr__("_doc_class", doc_class) super().__init__(response) def __iter__(self) -> Iterator[_R]: # type: ignore[override] return iter(self.hits) def __getitem__(self, key: Union[slice, int, str]) -> Any: if isinstance(key, (slice, int)): # for slicing etc return self.hits[key] return super().__getitem__(key) def __nonzero__(self) -> bool: return bool(self.hits) __bool__ = __nonzero__ def __repr__(self) -> str: return "" % (self.hits or self.aggregations) def __len__(self) -> int: return len(self.hits) def __getstate__(self) -> Tuple[Dict[str, Any], "Request[_R]", Optional[_R]]: # type: ignore[override] return self._d_, self._search, self._doc_class def __setstate__( self, state: Tuple[Dict[str, Any], "Request[_R]", Optional[_R]] # type: ignore[override] ) -> None: super(AttrDict, self).__setattr__("_d_", state[0]) super(AttrDict, self).__setattr__("_search", state[1]) super(AttrDict, self).__setattr__("_doc_class", state[2]) def success(self) -> bool: return self._shards.total == self._shards.successful and not self.timed_out @property def hits(self) -> List[_R]: if not hasattr(self, "_hits"): h = cast(AttrDict[Any], self._d_["hits"]) try: hits = AttrList(list(map(self._search._get_result, h["hits"]))) except AttributeError as e: # avoid raising AttributeError since it will be hidden by the property raise TypeError("Could not parse hits.", e) # avoid assigning _hits into self._d_ super(AttrDict, self).__setattr__("_hits", hits) for k in h: setattr(self._hits, k, _wrap(h[k])) return self._hits @property def aggregations(self) -> "AggResponse[_R]": return self.aggs @property def aggs(self) -> "AggResponse[_R]": if not hasattr(self, "_aggs"): aggs = AggResponse[_R]( cast("Agg[_R]", self._search.aggs), self._search, cast(Dict[str, Any], self._d_.get("aggregations", {})), ) # avoid assigning _aggs into self._d_ super(AttrDict, self).__setattr__("_aggs", aggs) return cast("AggResponse[_R]", self._aggs) def search_after(self) -> "SearchBase[_R]": """ Return a ``Search`` instance that retrieves the next page of results. This method provides an easy way to paginate a long list of results using the ``search_after`` option. For example:: page_size = 20 s = Search()[:page_size].sort("date") while True: # get a page of results r = await s.execute() # do something with this page of results # exit the loop if we reached the end if len(r.hits) < page_size: break # get a search object with the next page of results s = r.search_after() Note that the ``search_after`` option requires the search to have an explicit ``sort`` order. """ if len(self.hits) == 0: raise ValueError("Cannot use search_after when there are no search results") if not hasattr(self.hits[-1].meta, "sort"): # type: ignore[attr-defined] raise ValueError("Cannot use search_after when results are not sorted") return self._search.extra(search_after=self.hits[-1].meta.sort) # type: ignore[attr-defined] AggregateResponseType = {{ response["aggregate_type"] }} class AggResponse(AttrDict[Any], Generic[_R]): """An Elasticsearch aggregation response.""" _meta: Dict[str, Any] def __init__(self, aggs: "Agg[_R]", search: "Request[_R]", data: Dict[str, Any]): super(AttrDict, self).__setattr__("_meta", {"search": search, "aggs": aggs}) super().__init__(data) def __getitem__(self, attr_name: str) -> AggregateResponseType: if attr_name in self._meta["aggs"]: # don't do self._meta['aggs'][attr_name] to avoid copying agg = self._meta["aggs"].aggs[attr_name] return cast(AggregateResponseType, agg.result(self._meta["search"], self._d_[attr_name])) return super().__getitem__(attr_name) # type: ignore[no-any-return] def __iter__(self) -> Iterator[AggregateResponseType]: # type: ignore[override] for name in self._meta["aggs"]: yield self[name] class UpdateByQueryResponse(AttrDict[Any], Generic[_R]): """An Elasticsearch update by query response. {% for arg in ubq_response.args %} {% for line in arg.doc %} {{ line }} {% endfor %} {% endfor %} """ _search: "UpdateByQueryBase[_R]" {% for arg in ubq_response.args %} {{ arg.name }}: {{ arg.type }} {% endfor %} def __init__( self, search: "Request[_R]", response: Dict[str, Any], doc_class: Optional[_R] = None, ): super(AttrDict, self).__setattr__("_search", search) super(AttrDict, self).__setattr__("_doc_class", doc_class) super().__init__(response) def success(self) -> bool: return not self.timed_out and not self.failures python-elasticsearch-9.1.1/utils/templates/types.py.tpl000066400000000000000000000065471506101734100233370ustar00rootroot00000000000000# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from typing import Any, Dict, Literal, Mapping, Sequence, Union from elastic_transport.client_utils import DEFAULT, DefaultType from . import Query from .document_base import InstrumentedField from .utils import AttrDict PipeSeparatedFlags = str {% for k in classes %} class {{ k.name }}({{ k.parent if k.parent else "AttrDict[Any]" }}): {% if k.docstring or k.args %} """ {% for line in k.docstring %} {{ line }} {% endfor %} {% if k.args %} {% if k.docstring %} {% endif %} {% endif %} {% for arg in k.args %} {% for line in arg.doc %} {{ line }} {% endfor %} {% endfor %} """ {% for arg in k.args %} {% if arg.name not in ["keys", "items"] %} {{ arg.name }}: {{ arg.type }} {% else %} {{ arg.name }}: {{ arg.type }} # type: ignore[assignment] {% endif %} {% endfor %} {% if not k.for_response %} def __init__( self, {% for arg in k.args %} {% if arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} {% if k.args and not k.args[-1].positional %} *, {% endif %} {% for arg in k.args %} {% if not arg.positional %} {{ arg.name }}: {{ arg.type }} = DEFAULT, {% endif %} {% endfor %} **kwargs: Any ): {% if k.is_single_field %} if _field is not DEFAULT: kwargs[str(_field)] = _value {% elif k.is_multi_field %} if _fields is not DEFAULT: for field, value in _fields.items(): kwargs[str(field)] = value {% endif %} {% for arg in k.args %} {% if not arg.positional %} if {{ arg.name }} is not DEFAULT: {% if "InstrumentedField" in arg.type %} kwargs["{{ arg.name }}"] = str({{ arg.name }}) {% else %} kwargs["{{ arg.name }}"] = {{ arg.name }} {% endif %} {% endif %} {% endfor %} {% if k.parent %} super().__init__(**kwargs) {% else %} super().__init__(kwargs) {% endif %} {% endif %} {% if k.buckets_as_dict %} @property def buckets_as_dict(self) -> Mapping[str, {{ k.buckets_as_dict }}]: return self.buckets # type: ignore[return-value] {% endif %} {% else %} pass {% endif %} {% endfor %}