pax_global_header00006660000000000000000000000064145612671100014515gustar00rootroot0000000000000052 comment=7f9e85017a0959e3ba07834880d92c748f8f67ab openai-python-1.12.0/000077500000000000000000000000001456126711000143705ustar00rootroot00000000000000openai-python-1.12.0/.devcontainer/000077500000000000000000000000001456126711000171275ustar00rootroot00000000000000openai-python-1.12.0/.devcontainer/Dockerfile000066400000000000000000000004631456126711000211240ustar00rootroot00000000000000ARG VARIANT="3.9" FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} USER vscode RUN curl -sSf https://rye-up.com/get | RYE_VERSION="0.15.2" RYE_INSTALL_OPTION="--yes" bash ENV PATH=/home/vscode/.rye/shims:$PATH RUN echo "[[ -d .venv ]] && source .venv/bin/activate" >> /home/vscode/.bashrc openai-python-1.12.0/.devcontainer/devcontainer.json000066400000000000000000000022271456126711000225060ustar00rootroot00000000000000// For format details, see https://aka.ms/devcontainer.json. For config options, see the // README at: https://github.com/devcontainers/templates/tree/main/src/debian { "name": "Debian", "build": { "dockerfile": "Dockerfile", "context": ".." }, "postStartCommand": "rye sync --all-features", "customizations": { "vscode": { "extensions": [ "ms-python.python" ], "settings": { "terminal.integrated.shell.linux": "/bin/bash", "python.pythonPath": ".venv/bin/python", "python.defaultInterpreterPath": ".venv/bin/python", "python.typeChecking": "basic", "terminal.integrated.env.linux": { "PATH": "/home/vscode/.rye/shims:${env:PATH}" } } } } // Features to add to the dev container. More info: https://containers.dev/features. // "features": {}, // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Configure tool-specific properties. // "customizations": {}, // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. // "remoteUser": "root" } openai-python-1.12.0/.github/000077500000000000000000000000001456126711000157305ustar00rootroot00000000000000openai-python-1.12.0/.github/CODEOWNERS000066400000000000000000000000241456126711000173170ustar00rootroot00000000000000* @openai/sdks-team openai-python-1.12.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001456126711000201135ustar00rootroot00000000000000openai-python-1.12.0/.github/ISSUE_TEMPLATE/bug_report.yml000066400000000000000000000033711456126711000230120ustar00rootroot00000000000000name: Bug report description: Report an issue or bug with this library labels: ['bug'] body: - type: markdown attributes: value: | Thanks for taking the time to fill out this bug report! - type: checkboxes id: non_api attributes: label: Confirm this is an issue with the Python library and not an underlying OpenAI API description: Issues with the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) options: - label: This is an issue with the Python library required: true - type: textarea id: what-happened attributes: label: Describe the bug description: A clear and concise description of what the bug is, and any additional context. placeholder: Tell us what you see! validations: required: true - type: textarea id: repro-steps attributes: label: To Reproduce description: Steps to reproduce the behavior. placeholder: | 1. Fetch a '...' 2. Update the '....' 3. See error validations: required: true - type: textarea id: code-snippets attributes: label: Code snippets description: If applicable, add code snippets to help explain your problem. render: Python validations: required: false - type: input id: os attributes: label: OS placeholder: macOS validations: required: true - type: input id: language-version attributes: label: Python version placeholder: Python v3.11.4 validations: required: true - type: input id: lib-version attributes: label: Library version placeholder: openai v1.0.1 validations: required: true openai-python-1.12.0/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000005311456126711000221020ustar00rootroot00000000000000blank_issues_enabled: false contact_links: - name: OpenAI support url: https://help.openai.com/ about: | Please only file issues here that you believe represent actual bugs or feature requests for the OpenAI Python library. If you're having general trouble with the OpenAI API, please visit our help center to get support. openai-python-1.12.0/.github/ISSUE_TEMPLATE/feature_request.yml000066400000000000000000000020041456126711000240350ustar00rootroot00000000000000name: Feature request description: Suggest an idea for this library labels: ['feature-request'] body: - type: markdown attributes: value: | Thanks for taking the time to fill out this feature request! - type: checkboxes id: non_api attributes: label: Confirm this is a feature request for the Python library and not the underlying OpenAI API. description: Feature requests for the underlying OpenAI API should be reported on our [Developer Community](https://community.openai.com/c/api/7) options: - label: This is a feature request for the Python library required: true - type: textarea id: feature attributes: label: Describe the feature or improvement you're requesting description: A clear and concise description of what you want to happen. validations: required: true - type: textarea id: context attributes: label: Additional context description: Add any other context about the feature request here. openai-python-1.12.0/.github/pull_request_template.md000066400000000000000000000007601456126711000226740ustar00rootroot00000000000000 - [ ] I understand that this repository is auto-generated and my pull request may not be merged ## Changes being requested ## Additional context & links openai-python-1.12.0/.github/workflows/000077500000000000000000000000001456126711000177655ustar00rootroot00000000000000openai-python-1.12.0/.github/workflows/ci.yml000066400000000000000000000014261456126711000211060ustar00rootroot00000000000000name: CI on: push: branches: - main pull_request: branches: - main jobs: lint: name: lint runs-on: ubuntu-latest if: github.repository == 'openai/openai-python' steps: - uses: actions/checkout@v3 - name: Install Rye run: | curl -sSf https://rye-up.com/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.15.2 RYE_INSTALL_OPTION: "--yes" - name: Install dependencies run: | rye sync --all-features - name: Run ruff run: | rye run check:ruff - name: Run type checking run: | rye run typecheck - name: Ensure importable run: | rye run python -c 'import openai' openai-python-1.12.0/.github/workflows/create-releases.yml000066400000000000000000000020421456126711000235520ustar00rootroot00000000000000name: Create releases on: schedule: - cron: '0 5 * * *' # every day at 5am UTC push: branches: - main jobs: release: name: release if: github.ref == 'refs/heads/main' && github.repository == 'openai/openai-python' runs-on: ubuntu-latest environment: publish steps: - uses: actions/checkout@v3 - uses: stainless-api/trigger-release-please@v1 id: release with: repo: ${{ github.event.repository.full_name }} stainless-api-key: ${{ secrets.STAINLESS_API_KEY }} - name: Install Rye if: ${{ steps.release.outputs.releases_created }} run: | curl -sSf https://rye-up.com/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.15.2 RYE_INSTALL_OPTION: "--yes" - name: Publish to PyPI if: ${{ steps.release.outputs.releases_created }} run: | bash ./bin/publish-pypi env: PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} openai-python-1.12.0/.github/workflows/publish-pypi.yml000066400000000000000000000013541456126711000231400ustar00rootroot00000000000000# workflow for re-running publishing to PyPI in case it fails for some reason # you can run this workflow by navigating to https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml name: Publish PyPI on: workflow_dispatch: jobs: publish: name: publish runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - name: Install Rye run: | curl -sSf https://rye-up.com/get | bash echo "$HOME/.rye/shims" >> $GITHUB_PATH env: RYE_VERSION: 0.15.2 RYE_INSTALL_OPTION: "--yes" - name: Publish to PyPI run: | bash ./bin/publish-pypi env: PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} openai-python-1.12.0/.github/workflows/release-doctor.yml000066400000000000000000000012631456126711000234220ustar00rootroot00000000000000name: Release Doctor on: push: branches: - main workflow_dispatch: jobs: release_doctor: name: release doctor runs-on: ubuntu-latest environment: publish if: github.repository == 'openai/openai-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next') steps: - uses: actions/checkout@v3 - name: Check release environment run: | bash ./bin/check-release-environment env: STAINLESS_API_KEY: ${{ secrets.STAINLESS_API_KEY }} PYPI_TOKEN: ${{ secrets.OPENAI_PYPI_TOKEN || secrets.PYPI_TOKEN }} openai-python-1.12.0/.gitignore000066400000000000000000000001221456126711000163530ustar00rootroot00000000000000.vscode _dev __pycache__ .mypy_cache dist .venv .idea .env .envrc codegen.log openai-python-1.12.0/.python-version000066400000000000000000000000071456126711000173720ustar00rootroot000000000000003.9.18 openai-python-1.12.0/.release-please-manifest.json000066400000000000000000000000231456126711000220270ustar00rootroot00000000000000{ ".": "1.12.0" }openai-python-1.12.0/.stats.yml000066400000000000000000000000311456126711000163210ustar00rootroot00000000000000configured_endpoints: 51 openai-python-1.12.0/CHANGELOG.md000066400000000000000000000734411456126711000162120ustar00rootroot00000000000000# Changelog ## 1.12.0 (2024-02-08) Full Changelog: [v1.11.1...v1.12.0](https://github.com/openai/openai-python/compare/v1.11.1...v1.12.0) ### Features * **api:** add `timestamp_granularities`, add `gpt-3.5-turbo-0125` model ([#1125](https://github.com/openai/openai-python/issues/1125)) ([1ecf8f6](https://github.com/openai/openai-python/commit/1ecf8f6b12323ed09fb6a2815c85b9533ee52a50)) * **cli/images:** add support for `--model` arg ([#1132](https://github.com/openai/openai-python/issues/1132)) ([0d53866](https://github.com/openai/openai-python/commit/0d5386615cda7cd50d5db90de2119b84dba29519)) ### Bug Fixes * remove double brackets from timestamp_granularities param ([#1140](https://github.com/openai/openai-python/issues/1140)) ([3db0222](https://github.com/openai/openai-python/commit/3db022216a81fa86470b53ec1246669bc7b17897)) * **types:** loosen most List params types to Iterable ([#1129](https://github.com/openai/openai-python/issues/1129)) ([bdb31a3](https://github.com/openai/openai-python/commit/bdb31a3b1db6ede4e02b3c951c4fd23f70260038)) ### Chores * **internal:** add lint command ([#1128](https://github.com/openai/openai-python/issues/1128)) ([4c021c0](https://github.com/openai/openai-python/commit/4c021c0ab0151c2ec092d860c9b60e22e658cd03)) * **internal:** support serialising iterable types ([#1127](https://github.com/openai/openai-python/issues/1127)) ([98d4e59](https://github.com/openai/openai-python/commit/98d4e59afcf2d65d4e660d91eb9462240ef5cd63)) ### Documentation * add CONTRIBUTING.md ([#1138](https://github.com/openai/openai-python/issues/1138)) ([79c8f0e](https://github.com/openai/openai-python/commit/79c8f0e8bf5470e2e31e781e8d279331e89ddfbe)) ## 1.11.1 (2024-02-04) Full Changelog: [v1.11.0...v1.11.1](https://github.com/openai/openai-python/compare/v1.11.0...v1.11.1) ### Bug Fixes * prevent crash when platform.architecture() is not allowed ([#1120](https://github.com/openai/openai-python/issues/1120)) ([9490554](https://github.com/openai/openai-python/commit/949055488488e93597cbc6c2cdd81f14f203e53b)) ## 1.11.0 (2024-02-03) Full Changelog: [v1.10.0...v1.11.0](https://github.com/openai/openai-python/compare/v1.10.0...v1.11.0) ### Features * **client:** support parsing custom response types ([#1111](https://github.com/openai/openai-python/issues/1111)) ([da00fc3](https://github.com/openai/openai-python/commit/da00fc3f8e0ff13c6c3ca970e4bb86846304bd06)) ### Chores * **interal:** make link to api.md relative ([#1117](https://github.com/openai/openai-python/issues/1117)) ([4a10879](https://github.com/openai/openai-python/commit/4a108797e46293357601ce933e21b557a5dc6954)) * **internal:** cast type in mocked test ([#1112](https://github.com/openai/openai-python/issues/1112)) ([99b21e1](https://github.com/openai/openai-python/commit/99b21e1fc681eb10e01d479cc043ad3c89272b1c)) * **internal:** enable ruff type checking misuse lint rule ([#1106](https://github.com/openai/openai-python/issues/1106)) ([fa63e60](https://github.com/openai/openai-python/commit/fa63e605c82ec78f4fc27469c434b421a08fb909)) * **internal:** support multipart data with overlapping keys ([#1104](https://github.com/openai/openai-python/issues/1104)) ([455bc9f](https://github.com/openai/openai-python/commit/455bc9f1fd018a32cd604eb4b400e05aa8d71822)) * **internal:** support pre-release versioning ([#1113](https://github.com/openai/openai-python/issues/1113)) ([dea5b08](https://github.com/openai/openai-python/commit/dea5b08c28d47b331fd44f6920cf9fe322b68e51)) ## 1.10.0 (2024-01-25) Full Changelog: [v1.9.0...v1.10.0](https://github.com/openai/openai-python/compare/v1.9.0...v1.10.0) ### Features * **api:** add text embeddings dimensions param ([#1103](https://github.com/openai/openai-python/issues/1103)) ([94abfa0](https://github.com/openai/openai-python/commit/94abfa0f988c199ea95a9c870c4ae9808823186d)) * **azure:** proactively add audio/speech to deployment endpoints ([#1099](https://github.com/openai/openai-python/issues/1099)) ([fdf8742](https://github.com/openai/openai-python/commit/fdf87429b45ceb47ae6fd068ab70cc07bcb8da44)) * **client:** enable follow redirects by default ([#1100](https://github.com/openai/openai-python/issues/1100)) ([d325b7c](https://github.com/openai/openai-python/commit/d325b7ca594c2abaada536249b5633b106943333)) ### Chores * **internal:** add internal helpers ([#1092](https://github.com/openai/openai-python/issues/1092)) ([629bde5](https://github.com/openai/openai-python/commit/629bde5800d84735e22d924db23109a141f48644)) ### Refactors * remove unnecessary builtin import ([#1094](https://github.com/openai/openai-python/issues/1094)) ([504b7d4](https://github.com/openai/openai-python/commit/504b7d4a0b4715bd49a1a076a8d4868e51fb3351)) ## 1.9.0 (2024-01-21) Full Changelog: [v1.8.0...v1.9.0](https://github.com/openai/openai-python/compare/v1.8.0...v1.9.0) ### Features * **api:** add usage to runs and run steps ([#1090](https://github.com/openai/openai-python/issues/1090)) ([6c116df](https://github.com/openai/openai-python/commit/6c116dfbb0065d15050450df70e0e98fc8c80349)) ### Chores * **internal:** fix typing util function ([#1083](https://github.com/openai/openai-python/issues/1083)) ([3e60db6](https://github.com/openai/openai-python/commit/3e60db69f5d9187c4eb38451967259f534a36a82)) * **internal:** remove redundant client test ([#1085](https://github.com/openai/openai-python/issues/1085)) ([947974f](https://github.com/openai/openai-python/commit/947974f5af726e252b7b12c863743e50f41b79d3)) * **internal:** share client instances between all tests ([#1088](https://github.com/openai/openai-python/issues/1088)) ([05cd753](https://github.com/openai/openai-python/commit/05cd7531d40774d05c52b14dee54d137ac1452a3)) * **internal:** speculative retry-after-ms support ([#1086](https://github.com/openai/openai-python/issues/1086)) ([36a7576](https://github.com/openai/openai-python/commit/36a7576a913be8509a3cf6f262543083b485136e)) * lazy load raw resource class properties ([#1087](https://github.com/openai/openai-python/issues/1087)) ([d307127](https://github.com/openai/openai-python/commit/d30712744be07461e86763705c03c3495eadfc35)) ## 1.8.0 (2024-01-16) Full Changelog: [v1.7.2...v1.8.0](https://github.com/openai/openai-python/compare/v1.7.2...v1.8.0) ### Features * **client:** add support for streaming raw responses ([#1072](https://github.com/openai/openai-python/issues/1072)) ([0e93c3b](https://github.com/openai/openai-python/commit/0e93c3b5bc9cfa041e91962fd82c0d9358125024)) ### Bug Fixes * **client:** ensure path params are non-empty ([#1075](https://github.com/openai/openai-python/issues/1075)) ([9a25149](https://github.com/openai/openai-python/commit/9a2514997c2ddccbec9df8be3773e83271f1dab8)) * **proxy:** prevent recursion errors when debugging pycharm ([#1076](https://github.com/openai/openai-python/issues/1076)) ([3d78798](https://github.com/openai/openai-python/commit/3d787987cf7625b5b502cb0b63a37d55956eaf1d)) ### Chores * add write_to_file binary helper method ([#1077](https://github.com/openai/openai-python/issues/1077)) ([c622c6a](https://github.com/openai/openai-python/commit/c622c6aaf2ae7dc62bd6cdfc053204c5dc3293ac)) ## 1.7.2 (2024-01-12) Full Changelog: [v1.7.1...v1.7.2](https://github.com/openai/openai-python/compare/v1.7.1...v1.7.2) ### Documentation * **readme:** improve api reference ([#1065](https://github.com/openai/openai-python/issues/1065)) ([745b9e0](https://github.com/openai/openai-python/commit/745b9e08ae0abb8bf4cd87ed40fa450d9ad81ede)) ### Refactors * **api:** remove deprecated endpoints ([#1067](https://github.com/openai/openai-python/issues/1067)) ([199ddcd](https://github.com/openai/openai-python/commit/199ddcdca00c136e4e0c3ff16521eff22acf2a1a)) ## 1.7.1 (2024-01-10) Full Changelog: [v1.7.0...v1.7.1](https://github.com/openai/openai-python/compare/v1.7.0...v1.7.1) ### Chores * **client:** improve debug logging for failed requests ([#1060](https://github.com/openai/openai-python/issues/1060)) ([cf9a651](https://github.com/openai/openai-python/commit/cf9a6517b4aa0f24bcbe143c54ea908d43dfda92)) ## 1.7.0 (2024-01-08) Full Changelog: [v1.6.1...v1.7.0](https://github.com/openai/openai-python/compare/v1.6.1...v1.7.0) ### Features * add `None` default value to nullable response properties ([#1043](https://github.com/openai/openai-python/issues/1043)) ([d94b4d3](https://github.com/openai/openai-python/commit/d94b4d3d0adcd1a49a1c25cc9730cef013a3e9c9)) ### Bug Fixes * **client:** correctly use custom http client auth ([#1028](https://github.com/openai/openai-python/issues/1028)) ([3d7d93e](https://github.com/openai/openai-python/commit/3d7d93e951eb7fe09cd9d94d10a62a020398c7f9)) ### Chores * add .keep files for examples and custom code directories ([#1057](https://github.com/openai/openai-python/issues/1057)) ([7524097](https://github.com/openai/openai-python/commit/7524097a47af0fdc8b560186ef3b111b59430741)) * **internal:** bump license ([#1037](https://github.com/openai/openai-python/issues/1037)) ([d828527](https://github.com/openai/openai-python/commit/d828527540ebd97679075f48744818f06311b0cb)) * **internal:** loosen type var restrictions ([#1049](https://github.com/openai/openai-python/issues/1049)) ([e00876b](https://github.com/openai/openai-python/commit/e00876b20b93038450eb317899d8775c7661b8eb)) * **internal:** replace isort with ruff ([#1042](https://github.com/openai/openai-python/issues/1042)) ([f1fbc9c](https://github.com/openai/openai-python/commit/f1fbc9c0d62e7d89ab32c8bdfa39cd94b560690b)) * **internal:** update formatting ([#1041](https://github.com/openai/openai-python/issues/1041)) ([2e9ecee](https://github.com/openai/openai-python/commit/2e9ecee9bdfa8ec33b1b1527d5187483b700fad3)) * **src:** fix typos ([#988](https://github.com/openai/openai-python/issues/988)) ([6a8b806](https://github.com/openai/openai-python/commit/6a8b80624636f9a0e5ada151b2509710a6f74808)) * use property declarations for resource members ([#1047](https://github.com/openai/openai-python/issues/1047)) ([131f6bc](https://github.com/openai/openai-python/commit/131f6bc6b0ccf79119096057079e10906b3d4678)) ### Documentation * fix docstring typos ([#1022](https://github.com/openai/openai-python/issues/1022)) ([ad3fd2c](https://github.com/openai/openai-python/commit/ad3fd2cd19bf91f94473e368554dff39a8f9ad16)) * improve audio example to show how to stream to a file ([#1017](https://github.com/openai/openai-python/issues/1017)) ([d45ed7f](https://github.com/openai/openai-python/commit/d45ed7f0513b167555ae875f1877fa205c5790d2)) ## 1.6.1 (2023-12-22) Full Changelog: [v1.6.0...v1.6.1](https://github.com/openai/openai-python/compare/v1.6.0...v1.6.1) ### Chores * **internal:** add bin script ([#1001](https://github.com/openai/openai-python/issues/1001)) ([99ffbda](https://github.com/openai/openai-python/commit/99ffbda279bf4c159511fb96b1d5bb688af25437)) * **internal:** use ruff instead of black for formatting ([#1008](https://github.com/openai/openai-python/issues/1008)) ([ceaf9a0](https://github.com/openai/openai-python/commit/ceaf9a06fbd1a846756bb72cce50a69c8cc20bd3)) ## 1.6.0 (2023-12-19) Full Changelog: [v1.5.0...v1.6.0](https://github.com/openai/openai-python/compare/v1.5.0...v1.6.0) ### Features * **api:** add additional instructions for runs ([#995](https://github.com/openai/openai-python/issues/995)) ([7bf9b75](https://github.com/openai/openai-python/commit/7bf9b75067905449e83e828c12eb384022cff6ca)) ### Chores * **cli:** fix typo in completions ([#985](https://github.com/openai/openai-python/issues/985)) ([d1e9e8f](https://github.com/openai/openai-python/commit/d1e9e8f24df366bb7b796c55a98247c025d229f5)) * **cli:** fix typo in completions ([#986](https://github.com/openai/openai-python/issues/986)) ([626bc34](https://github.com/openai/openai-python/commit/626bc34d82a7057bac99f8b556f9e5f60c261ee7)) * **internal:** fix binary response tests ([#983](https://github.com/openai/openai-python/issues/983)) ([cfb7e30](https://github.com/openai/openai-python/commit/cfb7e308393f2e912e959dd10d68096dd5b3ab9c)) * **internal:** fix typos ([#993](https://github.com/openai/openai-python/issues/993)) ([3b338a4](https://github.com/openai/openai-python/commit/3b338a401b206618774291ff8137deb0cc5f6b4c)) * **internal:** minor utils restructuring ([#992](https://github.com/openai/openai-python/issues/992)) ([5ba576a](https://github.com/openai/openai-python/commit/5ba576ae38d2c4c4d32a21933e0d68e0bc2f0d49)) * **package:** bump minimum typing-extensions to 4.7 ([#994](https://github.com/openai/openai-python/issues/994)) ([0c2da84](https://github.com/openai/openai-python/commit/0c2da84badf416f8b2213983f68bd2b6f9e52f2b)) * **streaming:** update constructor to use direct client names ([#991](https://github.com/openai/openai-python/issues/991)) ([6c3427d](https://github.com/openai/openai-python/commit/6c3427dac8c414658516aeb4caf5d5fd8b11097b)) ### Documentation * upgrade models in examples to latest version ([#989](https://github.com/openai/openai-python/issues/989)) ([cedd574](https://github.com/openai/openai-python/commit/cedd574e5611f3e71e92b523a72ba87bcfe546f1)) ## 1.5.0 (2023-12-17) Full Changelog: [v1.4.0...v1.5.0](https://github.com/openai/openai-python/compare/v1.4.0...v1.5.0) ### Features * **api:** add token logprobs to chat completions ([#980](https://github.com/openai/openai-python/issues/980)) ([f50e962](https://github.com/openai/openai-python/commit/f50e962b930bd682a4299143b2995337e8571273)) ### Chores * **ci:** run release workflow once per day ([#978](https://github.com/openai/openai-python/issues/978)) ([215476a](https://github.com/openai/openai-python/commit/215476a0b99e0c92ab3e44ddd25de207af32d160)) ## 1.4.0 (2023-12-15) Full Changelog: [v1.3.9...v1.4.0](https://github.com/openai/openai-python/compare/v1.3.9...v1.4.0) ### Features * **api:** add optional `name` argument + improve docs ([#972](https://github.com/openai/openai-python/issues/972)) ([7972010](https://github.com/openai/openai-python/commit/7972010615820099f662c02821cfbd59e7d6ea44)) ## 1.3.9 (2023-12-12) Full Changelog: [v1.3.8...v1.3.9](https://github.com/openai/openai-python/compare/v1.3.8...v1.3.9) ### Documentation * improve README timeout comment ([#964](https://github.com/openai/openai-python/issues/964)) ([3c3ed5e](https://github.com/openai/openai-python/commit/3c3ed5edd938a9333e8d2fa47cb4b44178eef89a)) * small Improvement in the async chat response code ([#959](https://github.com/openai/openai-python/issues/959)) ([fb9d0a3](https://github.com/openai/openai-python/commit/fb9d0a358fa232043d9d5c149b6a888d50127c7b)) * small streaming readme improvements ([#962](https://github.com/openai/openai-python/issues/962)) ([f3be2e5](https://github.com/openai/openai-python/commit/f3be2e5cc24988471e6cedb3e34bdfd3123edc63)) ### Refactors * **client:** simplify cleanup ([#966](https://github.com/openai/openai-python/issues/966)) ([5c138f4](https://github.com/openai/openai-python/commit/5c138f4a7947e5b4aae8779fae78ca51269b355a)) * simplify internal error handling ([#968](https://github.com/openai/openai-python/issues/968)) ([d187f6b](https://github.com/openai/openai-python/commit/d187f6b6e4e646cca39c6ca35c618aa5c1bfbd61)) ## 1.3.8 (2023-12-08) Full Changelog: [v1.3.7...v1.3.8](https://github.com/openai/openai-python/compare/v1.3.7...v1.3.8) ### Bug Fixes * avoid leaking memory when Client.with_options is used ([#956](https://github.com/openai/openai-python/issues/956)) ([e37ecca](https://github.com/openai/openai-python/commit/e37ecca04040ce946822a7e40f5604532a59ee85)) * **errors:** properly assign APIError.body ([#949](https://github.com/openai/openai-python/issues/949)) ([c70e194](https://github.com/openai/openai-python/commit/c70e194f0a253409ec851607ae5219e3b5a8c442)) * **pagination:** use correct type hint for .object ([#943](https://github.com/openai/openai-python/issues/943)) ([23fe7ee](https://github.com/openai/openai-python/commit/23fe7ee48a71539b0d1e95ceff349264aae4090e)) ### Chores * **internal:** enable more lint rules ([#945](https://github.com/openai/openai-python/issues/945)) ([2c8add6](https://github.com/openai/openai-python/commit/2c8add64a261dea731bd162bb0cca222518d5440)) * **internal:** reformat imports ([#939](https://github.com/openai/openai-python/issues/939)) ([ec65124](https://github.com/openai/openai-python/commit/ec651249de2f4e4cf959f816e1b52f03d3b1017a)) * **internal:** reformat imports ([#944](https://github.com/openai/openai-python/issues/944)) ([5290639](https://github.com/openai/openai-python/commit/52906391c9b6633656ec7934e6bbac553ec667cd)) * **internal:** update formatting ([#941](https://github.com/openai/openai-python/issues/941)) ([8e5a156](https://github.com/openai/openai-python/commit/8e5a156d555fe68731ba0604a7455cc03cb451ce)) * **package:** lift anyio v4 restriction ([#927](https://github.com/openai/openai-python/issues/927)) ([be0438a](https://github.com/openai/openai-python/commit/be0438a2e399bb0e0a94907229d02fc61ab479c0)) ### Documentation * fix typo in example ([#950](https://github.com/openai/openai-python/issues/950)) ([54f0ce0](https://github.com/openai/openai-python/commit/54f0ce0000abe32e97ae400f2975c028b8a84273)) ## 1.3.7 (2023-12-01) Full Changelog: [v1.3.6...v1.3.7](https://github.com/openai/openai-python/compare/v1.3.6...v1.3.7) ### Bug Fixes * **client:** correct base_url setter implementation ([#919](https://github.com/openai/openai-python/issues/919)) ([135d9cf](https://github.com/openai/openai-python/commit/135d9cf2820f1524764bf536a9322830bdcd5875)) * **client:** don't cause crashes when inspecting the module ([#897](https://github.com/openai/openai-python/issues/897)) ([db029a5](https://github.com/openai/openai-python/commit/db029a596c90b1af4ef0bfb1cdf31f54b2f5755d)) * **client:** ensure retried requests are closed ([#902](https://github.com/openai/openai-python/issues/902)) ([e025e6b](https://github.com/openai/openai-python/commit/e025e6bee44ea145d948869ef0c79bac0c376b9f)) ### Chores * **internal:** add tests for proxy change ([#899](https://github.com/openai/openai-python/issues/899)) ([71a13d0](https://github.com/openai/openai-python/commit/71a13d0c70d105b2b97720c72a1003b942cda2ae)) * **internal:** remove unused type var ([#915](https://github.com/openai/openai-python/issues/915)) ([4233bcd](https://github.com/openai/openai-python/commit/4233bcdae5f467f10454fcc008a6e728fa846830)) * **internal:** replace string concatenation with f-strings ([#908](https://github.com/openai/openai-python/issues/908)) ([663a8f6](https://github.com/openai/openai-python/commit/663a8f6dead5aa523d1e8779e75af1dabb1690c4)) * **internal:** replace string concatenation with f-strings ([#909](https://github.com/openai/openai-python/issues/909)) ([caab767](https://github.com/openai/openai-python/commit/caab767156375114078cf8d85031863361326b5f)) ### Documentation * fix typo in readme ([#904](https://github.com/openai/openai-python/issues/904)) ([472cd44](https://github.com/openai/openai-python/commit/472cd44e45a45b0b4f12583a5402e8aeb121d7a2)) * **readme:** update example snippets ([#907](https://github.com/openai/openai-python/issues/907)) ([bbb648e](https://github.com/openai/openai-python/commit/bbb648ef81eb11f81b457e2cbf33a832f4d29a76)) ## 1.3.6 (2023-11-28) Full Changelog: [v1.3.5...v1.3.6](https://github.com/openai/openai-python/compare/v1.3.5...v1.3.6) ### Bug Fixes * **client:** add support for streaming binary responses ([#866](https://github.com/openai/openai-python/issues/866)) ([2470d25](https://github.com/openai/openai-python/commit/2470d251b751e92e8950bc9e3026965e9925ac1c)) ### Chores * **deps:** bump mypy to v1.7.1 ([#891](https://github.com/openai/openai-python/issues/891)) ([11fcb2a](https://github.com/openai/openai-python/commit/11fcb2a3cd4205b307c13c65ad47d9e315b0084d)) * **internal:** send more detailed x-stainless headers ([#877](https://github.com/openai/openai-python/issues/877)) ([69e0549](https://github.com/openai/openai-python/commit/69e054947d587ff2548b101ece690d21d3c38f74)) * revert binary streaming change ([#875](https://github.com/openai/openai-python/issues/875)) ([0a06d6a](https://github.com/openai/openai-python/commit/0a06d6a078c5ee898dae75bab4988e1a1936bfbf)) ### Documentation * **readme:** minor updates ([#894](https://github.com/openai/openai-python/issues/894)) ([5458457](https://github.com/openai/openai-python/commit/54584572df4c2a086172d812c6acb84e3405328b)) * **readme:** update examples ([#893](https://github.com/openai/openai-python/issues/893)) ([124da87](https://github.com/openai/openai-python/commit/124da8720c44d40c083d29179f46a265761c1f4f)) * update readme code snippet ([#890](https://github.com/openai/openai-python/issues/890)) ([c522f21](https://github.com/openai/openai-python/commit/c522f21e2a685454185d57e462e74a28499460f9)) ## 1.3.5 (2023-11-21) Full Changelog: [v1.3.4...v1.3.5](https://github.com/openai/openai-python/compare/v1.3.4...v1.3.5) ### Bug Fixes * **azure:** ensure custom options can be passed to copy ([#858](https://github.com/openai/openai-python/issues/858)) ([05ca0d6](https://github.com/openai/openai-python/commit/05ca0d68e84d40f975614d27cb52c0f382104377)) ### Chores * **package:** add license classifier ([#826](https://github.com/openai/openai-python/issues/826)) ([bec004d](https://github.com/openai/openai-python/commit/bec004d030b277e05bdd51f66fae1e881291c30b)) * **package:** add license classifier metadata ([#860](https://github.com/openai/openai-python/issues/860)) ([80dffb1](https://github.com/openai/openai-python/commit/80dffb17ff0a10b0b9ea704c4247521e48b68408)) ## 1.3.4 (2023-11-21) Full Changelog: [v1.3.3...v1.3.4](https://github.com/openai/openai-python/compare/v1.3.3...v1.3.4) ### Bug Fixes * **client:** attempt to parse unknown json content types ([#854](https://github.com/openai/openai-python/issues/854)) ([ba50466](https://github.com/openai/openai-python/commit/ba5046611029a67714d5120b9cc6a3c7fecce10c)) ### Chores * **examples:** fix static types in assistants example ([#852](https://github.com/openai/openai-python/issues/852)) ([5b47b2c](https://github.com/openai/openai-python/commit/5b47b2c542b9b4fb143af121022e2d5ad0890ef4)) ## 1.3.3 (2023-11-17) Full Changelog: [v1.3.2...v1.3.3](https://github.com/openai/openai-python/compare/v1.3.2...v1.3.3) ### Chores * **internal:** update type hint for helper function ([#846](https://github.com/openai/openai-python/issues/846)) ([9a5966c](https://github.com/openai/openai-python/commit/9a5966c70fce620a183de580938556730564a405)) ## 1.3.2 (2023-11-16) Full Changelog: [v1.3.1...v1.3.2](https://github.com/openai/openai-python/compare/v1.3.1...v1.3.2) ### Documentation * **readme:** minor updates ([#841](https://github.com/openai/openai-python/issues/841)) ([7273ad1](https://github.com/openai/openai-python/commit/7273ad1510043d3e264969c72403a1a237401910)) ## 1.3.1 (2023-11-16) Full Changelog: [v1.3.0...v1.3.1](https://github.com/openai/openai-python/compare/v1.3.0...v1.3.1) ### Chores * **internal:** add publish script ([#838](https://github.com/openai/openai-python/issues/838)) ([3ea41bc](https://github.com/openai/openai-python/commit/3ea41bcede374c4e5c92d85108281637c3382e12)) ## 1.3.0 (2023-11-15) Full Changelog: [v1.2.4...v1.3.0](https://github.com/openai/openai-python/compare/v1.2.4...v1.3.0) ### Features * **api:** add gpt-3.5-turbo-1106 ([#813](https://github.com/openai/openai-python/issues/813)) ([9bb3c4e](https://github.com/openai/openai-python/commit/9bb3c4ed88c890db2605a793aa39fffa1d84e8ef)) * **client:** support reading the base url from an env variable ([#829](https://github.com/openai/openai-python/issues/829)) ([ca5fdc6](https://github.com/openai/openai-python/commit/ca5fdc6ca006a3550cc5eeea70dd3d96b9ba305a)) ### Bug Fixes * **breaking!:** correct broken type names in moderation categories ([#811](https://github.com/openai/openai-python/issues/811)) ([0bc211f](https://github.com/openai/openai-python/commit/0bc211fd46f4fcc1f7687bdfdce26894b679cb4f)) ### Chores * fix typo in docs and add request header for function calls ([#807](https://github.com/openai/openai-python/issues/807)) ([cbef703](https://github.com/openai/openai-python/commit/cbef7030c7b21a0c766fe83c62657cea1cd8d31c)) * **internal:** fix devcontainer interpeter path ([#810](https://github.com/openai/openai-python/issues/810)) ([0acc07d](https://github.com/openai/openai-python/commit/0acc07dd8281ba881f91689b8a5e4254e8743fbc)) ### Documentation * add azure env vars ([#814](https://github.com/openai/openai-python/issues/814)) ([bd8e32a](https://github.com/openai/openai-python/commit/bd8e32a380218d0c9ff43643ccc1a25b3c35120d)) * fix code comment typo ([#790](https://github.com/openai/openai-python/issues/790)) ([8407a27](https://github.com/openai/openai-python/commit/8407a27e848ae611eb087c8d10632447d7c55498)) * **readme:** fix broken azure_ad notebook link ([#781](https://github.com/openai/openai-python/issues/781)) ([3b92cdf](https://github.com/openai/openai-python/commit/3b92cdfa5490b50a72811bec2f6e54e070847961)) ## 1.2.4 (2023-11-13) Full Changelog: [v1.2.3...v1.2.4](https://github.com/openai/openai-python/compare/v1.2.3...v1.2.4) ### Bug Fixes * **client:** retry if SSLWantReadError occurs in the async client ([#804](https://github.com/openai/openai-python/issues/804)) ([be82288](https://github.com/openai/openai-python/commit/be82288f3c88c10c9ac20ba3b8cb53b5c7a4e2f9)) ## 1.2.3 (2023-11-10) Full Changelog: [v1.2.2...v1.2.3](https://github.com/openai/openai-python/compare/v1.2.2...v1.2.3) ### Bug Fixes * **cli/audio:** file format detection failing for whisper ([#733](https://github.com/openai/openai-python/issues/733)) ([01079d6](https://github.com/openai/openai-python/commit/01079d6dca13e0ec158dff81e0706d8a9d6c02ef)) * **client:** correctly flush the stream response body ([#771](https://github.com/openai/openai-python/issues/771)) ([0d52731](https://github.com/openai/openai-python/commit/0d5273165c96286f8456ae04b9eb0de5144e52f8)) * **client:** serialise pydantic v1 default fields correctly in params ([#776](https://github.com/openai/openai-python/issues/776)) ([d4c49ad](https://github.com/openai/openai-python/commit/d4c49ad2be9c0d926eece5fd33f6836279ea21e2)) * **models:** mark unknown fields as set in pydantic v1 ([#772](https://github.com/openai/openai-python/issues/772)) ([ae032a1](https://github.com/openai/openai-python/commit/ae032a1ba4efa72284a572bfaf0305af50142835)) * prevent IndexError in fine-tunes CLI ([#768](https://github.com/openai/openai-python/issues/768)) ([42f1633](https://github.com/openai/openai-python/commit/42f16332cf0f96f243f9797d6406283865254355)) ### Documentation * reword package description ([#764](https://github.com/openai/openai-python/issues/764)) ([9ff10df](https://github.com/openai/openai-python/commit/9ff10df30ca2d44978eb5f982ccf039c9f1bf1bf)) ## 1.2.2 (2023-11-09) Full Changelog: [v1.2.1...v1.2.2](https://github.com/openai/openai-python/compare/v1.2.1...v1.2.2) ### Bug Fixes * **client:** correctly assign error properties ([#759](https://github.com/openai/openai-python/issues/759)) ([ef264d2](https://github.com/openai/openai-python/commit/ef264d2293b77784f69039291ca2a17a454851cb)) ### Documentation * **readme:** link to migration guide ([#761](https://github.com/openai/openai-python/issues/761)) ([ddde839](https://github.com/openai/openai-python/commit/ddde8392be19e7ad77280374806667ecaef612da)) ## 1.2.1 (2023-11-09) Full Changelog: [v1.2.0...v1.2.1](https://github.com/openai/openai-python/compare/v1.2.0...v1.2.1) ### Documentation * **readme:** fix nested params example ([#756](https://github.com/openai/openai-python/issues/756)) ([ffbe5ec](https://github.com/openai/openai-python/commit/ffbe5eca0f8790ebcdb27ffe845da178a3ef4c45)) ### Refactors * **client:** deprecate files.retrieve_content in favour of files.content ([#753](https://github.com/openai/openai-python/issues/753)) ([eea5bc1](https://github.com/openai/openai-python/commit/eea5bc173466f63a6e84bd2d741b4873ca056b4c)) ## 1.2.0 (2023-11-08) Full Changelog: [v1.1.2...v1.2.0](https://github.com/openai/openai-python/compare/v1.1.2...v1.2.0) ### Features * **api:** unify function types ([#741](https://github.com/openai/openai-python/issues/741)) ([ed16c4d](https://github.com/openai/openai-python/commit/ed16c4d2fec6cf4e33235d82b05ed9a777752204)) * **client:** support passing chunk size for binary responses ([#747](https://github.com/openai/openai-python/issues/747)) ([c0c89b7](https://github.com/openai/openai-python/commit/c0c89b77a69ef098900e3a194894efcf72085d36)) ### Bug Fixes * **api:** update embedding response object type ([#739](https://github.com/openai/openai-python/issues/739)) ([29182c4](https://github.com/openai/openai-python/commit/29182c4818e2c56f46e961dba33e31dc30c25519)) * **client:** show a helpful error message if the v0 API is used ([#743](https://github.com/openai/openai-python/issues/743)) ([920567c](https://github.com/openai/openai-python/commit/920567cb04df48a7f6cd2a3402a0b1f172c6290e)) ### Chores * **internal:** improve github devcontainer setup ([#737](https://github.com/openai/openai-python/issues/737)) ([0ac1abb](https://github.com/openai/openai-python/commit/0ac1abb07ec687a4f7b1150be10054dbd6e7cfbc)) ### Refactors * **api:** rename FunctionObject to FunctionDefinition ([#746](https://github.com/openai/openai-python/issues/746)) ([1afd138](https://github.com/openai/openai-python/commit/1afd13856c0e586ecbde8b24fe4f4bad9beeefdf)) ## 1.1.2 (2023-11-08) Full Changelog: [v1.1.1...v1.1.2](https://github.com/openai/openai-python/compare/v1.1.1...v1.1.2) ### Bug Fixes * **api:** accidentally required params, add new models & other fixes ([#729](https://github.com/openai/openai-python/issues/729)) ([03c3e03](https://github.com/openai/openai-python/commit/03c3e03fc758cf4e59b81edf73a2618d80b560b7)) * asssitant_deleted -> assistant_deleted ([#711](https://github.com/openai/openai-python/issues/711)) ([287b51e](https://github.com/openai/openai-python/commit/287b51e4f7cede9667c118007de1275eb04772c6)) ### Chores * **docs:** fix github links ([#719](https://github.com/openai/openai-python/issues/719)) ([0cda8ca](https://github.com/openai/openai-python/commit/0cda8cab718d53d7dc0604d9fac52838c9391565)) * **internal:** fix some typos ([#718](https://github.com/openai/openai-python/issues/718)) ([894ad87](https://github.com/openai/openai-python/commit/894ad874aaa5d74530f561896ff31f68693418da)) openai-python-1.12.0/CONTRIBUTING.md000066400000000000000000000066321456126711000166300ustar00rootroot00000000000000## Setting up the environment ### With Rye We use [Rye](https://rye-up.com/) to manage dependencies so we highly recommend [installing it](https://rye-up.com/guide/installation/) as it will automatically provision a Python environment with the expected Python version. After installing Rye, you'll just have to run this command: ```sh $ rye sync --all-features ``` You can then run scripts using `rye run python script.py` or by activating the virtual environment: ```sh $ rye shell # or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work $ source .venv/bin/activate # now you can omit the `rye run` prefix $ python script.py ``` ### Without Rye Alternatively if you don't want to install `Rye`, you can stick with the standard `pip` setup by ensuring you have the Python version specified in `.python-version`, create a virtual environment however you desire and then install dependencies using this command: ```sh $ pip install -r requirements-dev.lock ``` ## Modifying/Adding code Most of the SDK is generated code, and any modified code will be overridden on the next generation. The `src/openai/lib/` and `examples/` directories are exceptions and will never be overridden. ## Adding and running examples All files in the `examples/` directory are not modified by the Stainless generator and can be freely edited or added to. ```bash # add an example to examples/.py #!/usr/bin/env -S rye run python … ``` ``` chmod +x examples/.py # run the example against your api ./examples/.py ``` ## Using the repository from source If you’d like to use the repository from source, you can either install from git or link to a cloned repository: To install via git: ```bash pip install git+ssh://git@github.com:openai/openai-python.git ``` Alternatively, you can build from source and install the wheel file: Building this package will create two files in the `dist/` directory, a `.tar.gz` containing the source files and a `.whl` that can be used to install the package efficiently. To create a distributable version of the library, all you have to do is run this command: ```bash rye build # or python -m build ``` Then to install: ```sh pip install ./path-to-wheel-file.whl ``` ## Running tests Most tests will require you to [setup a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. ```bash # you will need npm installed npx prism path/to/your/openapi.yml ``` ```bash rye run pytest ``` ## Linting and formatting This repository uses [ruff](https://github.com/astral-sh/ruff) and [black](https://github.com/psf/black) to format the code in the repository. To lint: ```bash rye run lint ``` To format and fix all ruff issues automatically: ```bash rye run format ``` ## Publishing and releases Changes made to this repository via the automated release PR pipeline should publish to PyPI automatically. If the changes aren't made through the automated pipeline, you may want to make releases manually. ### Publish with a GitHub workflow You can release to package managers by using [the `Publish PyPI` GitHub action](https://www.github.com/openai/openai-python/actions/workflows/publish-pypi.yml). This will require a setup organization or repository secret to be set up. ### Publish manually If you need to manually release a package, you can run the `bin/publish-pypi` script with an `PYPI_TOKEN` set on the environment. openai-python-1.12.0/LICENSE000066400000000000000000000261101456126711000153750ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2024 OpenAI Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. openai-python-1.12.0/README.md000066400000000000000000000414621456126711000156560ustar00rootroot00000000000000# OpenAI Python API library [![PyPI version](https://img.shields.io/pypi/v/openai.svg)](https://pypi.org/project/openai/) The OpenAI Python library provides convenient access to the OpenAI REST API from any Python 3.7+ application. The library includes type definitions for all request params and response fields, and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx). It is generated from our [OpenAPI specification](https://github.com/openai/openai-openapi) with [Stainless](https://stainlessapi.com/). ## Documentation The REST API documentation can be found [on platform.openai.com](https://platform.openai.com/docs). The full API of this library can be found in [api.md](api.md). ## Installation > [!IMPORTANT] > The SDK was rewritten in v1, which was released November 6th 2023. See the [v1 migration guide](https://github.com/openai/openai-python/discussions/742), which includes scripts to automatically update your code. ```sh pip install openai ``` ## Usage The full API of this library can be found in [api.md](api.md). ```python import os from openai import OpenAI client = OpenAI( # This is the default and can be omitted api_key=os.environ.get("OPENAI_API_KEY"), ) chat_completion = client.chat.completions.create( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ) ``` While you can provide an `api_key` keyword argument, we recommend using [python-dotenv](https://pypi.org/project/python-dotenv/) to add `OPENAI_API_KEY="My API Key"` to your `.env` file so that your API Key is not stored in source control. ## Async usage Simply import `AsyncOpenAI` instead of `OpenAI` and use `await` with each API call: ```python import os import asyncio from openai import AsyncOpenAI client = AsyncOpenAI( # This is the default and can be omitted api_key=os.environ.get("OPENAI_API_KEY"), ) async def main() -> None: chat_completion = await client.chat.completions.create( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ) asyncio.run(main()) ``` Functionality between the synchronous and asynchronous clients is otherwise identical. ## Streaming Responses We provide support for streaming responses using Server Side Events (SSE). ```python from openai import OpenAI client = OpenAI() stream = client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) for chunk in stream: print(chunk.choices[0].delta.content or "", end="") ``` The async client uses the exact same interface. ```python from openai import AsyncOpenAI client = AsyncOpenAI() async def main(): stream = await client.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": "Say this is a test"}], stream=True, ) async for chunk in stream: print(chunk.choices[0].delta.content or "", end="") asyncio.run(main()) ``` ## Module-level client > [!IMPORTANT] > We highly recommend instantiating client instances instead of relying on the global client. We also expose a global client instance that is accessible in a similar fashion to versions prior to v1. ```py import openai # optional; defaults to `os.environ['OPENAI_API_KEY']` openai.api_key = '...' # all client options can be configured just like the `OpenAI` instantiation counterpart openai.base_url = "https://..." openai.default_headers = {"x-foo": "true"} completion = openai.chat.completions.create( model="gpt-4", messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], ) print(completion.choices[0].message.content) ``` The API is the exact same as the standard client instance based API. This is intended to be used within REPLs or notebooks for faster iteration, **not** in application code. We recommend that you always instantiate a client (e.g., with `client = OpenAI()`) in application code because: - It can be difficult to reason about where client options are configured - It's not possible to change certain client options without potentially causing race conditions - It's harder to mock for testing purposes - It's not possible to control cleanup of network connections ## Using types Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typing.html#typing.TypedDict). Responses are [Pydantic models](https://docs.pydantic.dev), which provide helper methods for things like: - Serializing back into JSON, `model.model_dump_json(indent=2, exclude_unset=True)` - Converting to a dictionary, `model.model_dump(exclude_unset=True)` Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. ## Pagination List methods in the OpenAI API are paginated. This library provides auto-paginating iterators with each list response, so you do not have to request successive pages manually: ```python import openai client = OpenAI() all_jobs = [] # Automatically fetches more pages as needed. for job in client.fine_tuning.jobs.list( limit=20, ): # Do something with job here all_jobs.append(job) print(all_jobs) ``` Or, asynchronously: ```python import asyncio import openai client = AsyncOpenAI() async def main() -> None: all_jobs = [] # Iterate through items across all pages, issuing requests as needed. async for job in client.fine_tuning.jobs.list( limit=20, ): all_jobs.append(job) print(all_jobs) asyncio.run(main()) ``` Alternatively, you can use the `.has_next_page()`, `.next_page_info()`, or `.get_next_page()` methods for more granular control working with pages: ```python first_page = await client.fine_tuning.jobs.list( limit=20, ) if first_page.has_next_page(): print(f"will fetch next page using these details: {first_page.next_page_info()}") next_page = await first_page.get_next_page() print(f"number of items we just fetched: {len(next_page.data)}") # Remove `await` for non-async usage. ``` Or just work directly with the returned data: ```python first_page = await client.fine_tuning.jobs.list( limit=20, ) print(f"next page cursor: {first_page.after}") # => "next page cursor: ..." for job in first_page.data: print(job.id) # Remove `await` for non-async usage. ``` ## Nested params Nested parameters are dictionaries, typed using `TypedDict`, for example: ```python from openai import OpenAI client = OpenAI() completion = client.chat.completions.create( messages=[ { "role": "user", "content": "Can you generate an example json object describing a fruit?", } ], model="gpt-3.5-turbo-1106", response_format={"type": "json_object"}, ) ``` ## File Uploads Request parameters that correspond to file uploads can be passed as `bytes`, a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. ```python from pathlib import Path from openai import OpenAI client = OpenAI() client.files.create( file=Path("input.jsonl"), purpose="fine-tune", ) ``` The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically. ## Handling errors When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `openai.APIConnectionError` is raised. When the API returns a non-success status code (that is, 4xx or 5xx response), a subclass of `openai.APIStatusError` is raised, containing `status_code` and `response` properties. All errors inherit from `openai.APIError`. ```python import openai from openai import OpenAI client = OpenAI() try: client.fine_tuning.jobs.create( model="gpt-3.5-turbo", training_file="file-abc123", ) except openai.APIConnectionError as e: print("The server could not be reached") print(e.__cause__) # an underlying Exception, likely raised within httpx. except openai.RateLimitError as e: print("A 429 status code was received; we should back off a bit.") except openai.APIStatusError as e: print("Another non-200-range status code was received") print(e.status_code) print(e.response) ``` Error codes are as followed: | Status Code | Error Type | | ----------- | -------------------------- | | 400 | `BadRequestError` | | 401 | `AuthenticationError` | | 403 | `PermissionDeniedError` | | 404 | `NotFoundError` | | 422 | `UnprocessableEntityError` | | 429 | `RateLimitError` | | >=500 | `InternalServerError` | | N/A | `APIConnectionError` | ### Retries Certain errors are automatically retried 2 times by default, with a short exponential backoff. Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, 429 Rate Limit, and >=500 Internal errors are all retried by default. You can use the `max_retries` option to configure or disable retry settings: ```python from openai import OpenAI # Configure the default for all requests: client = OpenAI( # default is 2 max_retries=0, ) # Or, configure per-request: client.with_options(max_retries=5).chat.completions.create( messages=[ { "role": "user", "content": "How can I get the name of the current day in Node.js?", } ], model="gpt-3.5-turbo", ) ``` ### Timeouts By default requests time out after 10 minutes. You can configure this with a `timeout` option, which accepts a float or an [`httpx.Timeout`](https://www.python-httpx.org/advanced/#fine-tuning-the-configuration) object: ```python from openai import OpenAI # Configure the default for all requests: client = OpenAI( # 20 seconds (default is 10 minutes) timeout=20.0, ) # More granular control: client = OpenAI( timeout=httpx.Timeout(60.0, read=5.0, write=10.0, connect=2.0), ) # Override per-request: client.with_options(timeout=5 * 1000).chat.completions.create( messages=[ { "role": "user", "content": "How can I list all files in a directory using Python?", } ], model="gpt-3.5-turbo", ) ``` On timeout, an `APITimeoutError` is thrown. Note that requests that time out are [retried twice by default](#retries). ## Advanced ### Logging We use the standard library [`logging`](https://docs.python.org/3/library/logging.html) module. You can enable logging by setting the environment variable `OPENAI_LOG` to `debug`. ```shell $ export OPENAI_LOG=debug ``` ### How to tell whether `None` means `null` or missing In an API response, a field may be explicitly `null`, or missing entirely; in either case, its value is `None` in this library. You can differentiate the two cases with `.model_fields_set`: ```py if response.my_field is None: if 'my_field' not in response.model_fields_set: print('Got json like {}, without a "my_field" key present at all.') else: print('Got json like {"my_field": null}.') ``` ### Accessing raw response data (e.g. headers) The "raw" Response object can be accessed by prefixing `.with_raw_response.` to any HTTP method call, e.g., ```py from openai import OpenAI client = OpenAI() response = client.chat.completions.with_raw_response.create( messages=[{ "role": "user", "content": "Say this is a test", }], model="gpt-3.5-turbo", ) print(response.headers.get('X-My-Header')) completion = response.parse() # get the object that `chat.completions.create()` would have returned print(completion) ``` These methods return an [`LegacyAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_legacy_response.py) object. This is a legacy class as we're changing it slightly in the next major version. For the sync client this will mostly be the same with the exception of `content` & `text` will be methods instead of properties. In the async client, all methods will be async. A migration script will be provided & the migration in general should be smooth. #### `.with_streaming_response` The above interface eagerly reads the full response body when you make the request, which may not always be what you want. To stream the response body, use `.with_streaming_response` instead, which requires a context manager and only reads the response body once you call `.read()`, `.text()`, `.json()`, `.iter_bytes()`, `.iter_text()`, `.iter_lines()` or `.parse()`. In the async client, these are async methods. As such, `.with_streaming_response` methods return a different [`APIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object, and the async client returns an [`AsyncAPIResponse`](https://github.com/openai/openai-python/tree/main/src/openai/_response.py) object. ```python with client.chat.completions.with_streaming_response.create( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ) as response: print(response.headers.get("X-My-Header")) for line in response.iter_lines(): print(line) ``` The context manager is required so that the response will reliably be closed. ### Configuring the HTTP client You can directly override the [httpx client](https://www.python-httpx.org/api/#client) to customize it for your use case, including: - Support for proxies - Custom transports - Additional [advanced](https://www.python-httpx.org/advanced/#client-instances) functionality ```python import httpx from openai import OpenAI client = OpenAI( # Or use the `OPENAI_BASE_URL` env var base_url="http://my.test.server.example.com:8083", http_client=httpx.Client( proxies="http://my.test.proxy.example.com", transport=httpx.HTTPTransport(local_address="0.0.0.0"), ), ) ``` ### Managing HTTP resources By default the library closes underlying HTTP connections whenever the client is [garbage collected](https://docs.python.org/3/reference/datamodel.html#object.__del__). You can manually close the client using the `.close()` method if desired, or with a context manager that closes when exiting. ## Microsoft Azure OpenAI To use this library with [Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/overview), use the `AzureOpenAI` class instead of the `OpenAI` class. > [!IMPORTANT] > The Azure API shape differs from the core API shape which means that the static types for responses / params > won't always be correct. ```py from openai import AzureOpenAI # gets the API Key from environment variable AZURE_OPENAI_API_KEY client = AzureOpenAI( # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning api_version="2023-07-01-preview", # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource azure_endpoint="https://example-endpoint.openai.azure.com", ) completion = client.chat.completions.create( model="deployment-name", # e.g. gpt-35-instant messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], ) print(completion.model_dump_json(indent=2)) ``` In addition to the options provided in the base `OpenAI` client, the following options are provided: - `azure_endpoint` (or the `AZURE_OPENAI_ENDPOINT` environment variable) - `azure_deployment` - `api_version` (or the `OPENAI_API_VERSION` environment variable) - `azure_ad_token` (or the `AZURE_OPENAI_AD_TOKEN` environment variable) - `azure_ad_token_provider` An example of using the client with Azure Active Directory can be found [here](https://github.com/openai/openai-python/blob/main/examples/azure_ad.py). ## Versioning This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: 1. Changes that only affect static types, without breaking runtime behavior. 2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. 3. Changes that we do not expect to impact the vast majority of users in practice. We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. We are keen for your feedback; please open an [issue](https://www.github.com/openai/openai-python/issues) with questions, bugs, or suggestions. ## Requirements Python 3.7 or higher. openai-python-1.12.0/api.md000066400000000000000000000411711456126711000154670ustar00rootroot00000000000000# Shared Types ```python from openai.types import FunctionDefinition, FunctionParameters ``` # Completions Types: ```python from openai.types import Completion, CompletionChoice, CompletionUsage ``` Methods: - client.completions.create(\*\*params) -> Completion # Chat ## Completions Types: ```python from openai.types.chat import ( ChatCompletion, ChatCompletionAssistantMessageParam, ChatCompletionChunk, ChatCompletionContentPart, ChatCompletionContentPartImage, ChatCompletionContentPartText, ChatCompletionFunctionCallOption, ChatCompletionFunctionMessageParam, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionMessageToolCall, ChatCompletionNamedToolChoice, ChatCompletionRole, ChatCompletionSystemMessageParam, ChatCompletionTokenLogprob, ChatCompletionTool, ChatCompletionToolChoiceOption, ChatCompletionToolMessageParam, ChatCompletionUserMessageParam, ) ``` Methods: - client.chat.completions.create(\*\*params) -> ChatCompletion # Embeddings Types: ```python from openai.types import CreateEmbeddingResponse, Embedding ``` Methods: - client.embeddings.create(\*\*params) -> CreateEmbeddingResponse # Files Types: ```python from openai.types import FileContent, FileDeleted, FileObject ``` Methods: - client.files.create(\*\*params) -> FileObject - client.files.retrieve(file_id) -> FileObject - client.files.list(\*\*params) -> SyncPage[FileObject] - client.files.delete(file_id) -> FileDeleted - client.files.content(file_id) -> HttpxBinaryResponseContent - client.files.retrieve_content(file_id) -> str - client.files.wait_for_processing(\*args) -> FileObject # Images Types: ```python from openai.types import Image, ImagesResponse ``` Methods: - client.images.create_variation(\*\*params) -> ImagesResponse - client.images.edit(\*\*params) -> ImagesResponse - client.images.generate(\*\*params) -> ImagesResponse # Audio ## Transcriptions Types: ```python from openai.types.audio import Transcription ``` Methods: - client.audio.transcriptions.create(\*\*params) -> Transcription ## Translations Types: ```python from openai.types.audio import Translation ``` Methods: - client.audio.translations.create(\*\*params) -> Translation ## Speech Methods: - client.audio.speech.create(\*\*params) -> HttpxBinaryResponseContent # Moderations Types: ```python from openai.types import Moderation, ModerationCreateResponse ``` Methods: - client.moderations.create(\*\*params) -> ModerationCreateResponse # Models Types: ```python from openai.types import Model, ModelDeleted ``` Methods: - client.models.retrieve(model) -> Model - client.models.list() -> SyncPage[Model] - client.models.delete(model) -> ModelDeleted # FineTuning ## Jobs Types: ```python from openai.types.fine_tuning import FineTuningJob, FineTuningJobEvent ``` Methods: - client.fine_tuning.jobs.create(\*\*params) -> FineTuningJob - client.fine_tuning.jobs.retrieve(fine_tuning_job_id) -> FineTuningJob - client.fine_tuning.jobs.list(\*\*params) -> SyncCursorPage[FineTuningJob] - client.fine_tuning.jobs.cancel(fine_tuning_job_id) -> FineTuningJob - client.fine_tuning.jobs.list_events(fine_tuning_job_id, \*\*params) -> SyncCursorPage[FineTuningJobEvent] # Beta ## Assistants Types: ```python from openai.types.beta import Assistant, AssistantDeleted ``` Methods: - client.beta.assistants.create(\*\*params) -> Assistant - client.beta.assistants.retrieve(assistant_id) -> Assistant - client.beta.assistants.update(assistant_id, \*\*params) -> Assistant - client.beta.assistants.list(\*\*params) -> SyncCursorPage[Assistant] - client.beta.assistants.delete(assistant_id) -> AssistantDeleted ### Files Types: ```python from openai.types.beta.assistants import AssistantFile, FileDeleteResponse ``` Methods: - client.beta.assistants.files.create(assistant_id, \*\*params) -> AssistantFile - client.beta.assistants.files.retrieve(file_id, \*, assistant_id) -> AssistantFile - client.beta.assistants.files.list(assistant_id, \*\*params) -> SyncCursorPage[AssistantFile] - client.beta.assistants.files.delete(file_id, \*, assistant_id) -> FileDeleteResponse ## Threads Types: ```python from openai.types.beta import Thread, ThreadDeleted ``` Methods: - client.beta.threads.create(\*\*params) -> Thread - client.beta.threads.retrieve(thread_id) -> Thread - client.beta.threads.update(thread_id, \*\*params) -> Thread - client.beta.threads.delete(thread_id) -> ThreadDeleted - client.beta.threads.create_and_run(\*\*params) -> Run ### Runs Types: ```python from openai.types.beta.threads import RequiredActionFunctionToolCall, Run ``` Methods: - client.beta.threads.runs.create(thread_id, \*\*params) -> Run - client.beta.threads.runs.retrieve(run_id, \*, thread_id) -> Run - client.beta.threads.runs.update(run_id, \*, thread_id, \*\*params) -> Run - client.beta.threads.runs.list(thread_id, \*\*params) -> SyncCursorPage[Run] - client.beta.threads.runs.cancel(run_id, \*, thread_id) -> Run - client.beta.threads.runs.submit_tool_outputs(run_id, \*, thread_id, \*\*params) -> Run #### Steps Types: ```python from openai.types.beta.threads.runs import ( CodeToolCall, FunctionToolCall, MessageCreationStepDetails, RetrievalToolCall, RunStep, ToolCallsStepDetails, ) ``` Methods: - client.beta.threads.runs.steps.retrieve(step_id, \*, thread_id, run_id) -> RunStep - client.beta.threads.runs.steps.list(run_id, \*, thread_id, \*\*params) -> SyncCursorPage[RunStep] ### Messages Types: ```python from openai.types.beta.threads import ( MessageContentImageFile, MessageContentText, ThreadMessage, ThreadMessageDeleted, ) ``` Methods: - client.beta.threads.messages.create(thread_id, \*\*params) -> ThreadMessage - client.beta.threads.messages.retrieve(message_id, \*, thread_id) -> ThreadMessage - client.beta.threads.messages.update(message_id, \*, thread_id, \*\*params) -> ThreadMessage - client.beta.threads.messages.list(thread_id, \*\*params) -> SyncCursorPage[ThreadMessage] #### Files Types: ```python from openai.types.beta.threads.messages import MessageFile ``` Methods: - client.beta.threads.messages.files.retrieve(file_id, \*, thread_id, message_id) -> MessageFile - client.beta.threads.messages.files.list(message_id, \*, thread_id, \*\*params) -> SyncCursorPage[MessageFile] openai-python-1.12.0/bin/000077500000000000000000000000001456126711000151405ustar00rootroot00000000000000openai-python-1.12.0/bin/check-env-state.py000066400000000000000000000016341456126711000204770ustar00rootroot00000000000000"""Script that exits 1 if the current environment is not in sync with the `requirements-dev.lock` file. """ from pathlib import Path import importlib_metadata def should_run_sync() -> bool: dev_lock = Path(__file__).parent.parent.joinpath("requirements-dev.lock") for line in dev_lock.read_text().splitlines(): if not line or line.startswith("#") or line.startswith("-e"): continue dep, lock_version = line.split("==") try: version = importlib_metadata.version(dep) if lock_version != version: print(f"mismatch for {dep} current={version} lock={lock_version}") return True except Exception: print(f"could not import {dep}") return True return False def main() -> None: if should_run_sync(): exit(1) else: exit(0) if __name__ == "__main__": main() openai-python-1.12.0/bin/check-release-environment000066400000000000000000000012061456126711000221170ustar00rootroot00000000000000#!/usr/bin/env bash errors=() if [ -z "${STAINLESS_API_KEY}" ]; then errors+=("The STAINLESS_API_KEY secret has not been set. Please contact Stainless for an API key & set it in your organization secrets on GitHub.") fi if [ -z "${PYPI_TOKEN}" ]; then errors+=("The OPENAI_PYPI_TOKEN secret has not been set. Please set it in either this repository's secrets or your organization secrets.") fi len=${#errors[@]} if [[ len -gt 0 ]]; then echo -e "Found the following errors in the release environment:\n" for error in "${errors[@]}"; do echo -e "- $error\n" done exit 1 fi echo "The environment is ready to push releases!" openai-python-1.12.0/bin/check-test-server000077500000000000000000000024501456126711000204250ustar00rootroot00000000000000#!/usr/bin/env bash RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[0;33m' NC='\033[0m' # No Color function prism_is_running() { curl --silent "http://localhost:4010" >/dev/null 2>&1 } function is_overriding_api_base_url() { [ -n "$TEST_API_BASE_URL" ] } if is_overriding_api_base_url ; then # If someone is running the tests against the live API, we can trust they know # what they're doing and exit early. echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" exit 0 elif prism_is_running ; then echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" echo exit 0 else echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" echo -e "running against your OpenAPI spec." echo echo -e "${YELLOW}To fix:${NC}" echo echo -e "1. Install Prism (requires Node 16+):" echo echo -e " With npm:" echo -e " \$ ${YELLOW}npm install -g @stoplight/prism-cli${NC}" echo echo -e " With yarn:" echo -e " \$ ${YELLOW}yarn global add @stoplight/prism-cli${NC}" echo echo -e "2. Run the mock server" echo echo -e " To run the server, pass in the path of your OpenAPI" echo -e " spec to the prism command:" echo echo -e " \$ ${YELLOW}prism mock path/to/your.openapi.yml${NC}" echo exit 1 fi openai-python-1.12.0/bin/publish-pypi000066400000000000000000000001441456126711000175070ustar00rootroot00000000000000#!/usr/bin/env bash set -eux mkdir -p dist rye build --clean rye publish --yes --token=$PYPI_TOKEN openai-python-1.12.0/bin/ruffen-docs.py000066400000000000000000000121401456126711000177230ustar00rootroot00000000000000# fork of https://github.com/asottile/blacken-docs adapted for ruff from __future__ import annotations import re import sys import argparse import textwrap import contextlib import subprocess from typing import Match, Optional, Sequence, Generator, NamedTuple, cast MD_RE = re.compile( r"(?P^(?P *)```\s*python\n)" r"(?P.*?)" r"(?P^(?P=indent)```\s*$)", re.DOTALL | re.MULTILINE, ) MD_PYCON_RE = re.compile( r"(?P^(?P *)```\s*pycon\n)" r"(?P.*?)" r"(?P^(?P=indent)```.*$)", re.DOTALL | re.MULTILINE, ) PYCON_PREFIX = ">>> " PYCON_CONTINUATION_PREFIX = "..." PYCON_CONTINUATION_RE = re.compile( rf"^{re.escape(PYCON_CONTINUATION_PREFIX)}( |$)", ) DEFAULT_LINE_LENGTH = 100 class CodeBlockError(NamedTuple): offset: int exc: Exception def format_str( src: str, ) -> tuple[str, Sequence[CodeBlockError]]: errors: list[CodeBlockError] = [] @contextlib.contextmanager def _collect_error(match: Match[str]) -> Generator[None, None, None]: try: yield except Exception as e: errors.append(CodeBlockError(match.start(), e)) def _md_match(match: Match[str]) -> str: code = textwrap.dedent(match["code"]) with _collect_error(match): code = format_code_block(code) code = textwrap.indent(code, match["indent"]) return f'{match["before"]}{code}{match["after"]}' def _pycon_match(match: Match[str]) -> str: code = "" fragment = cast(Optional[str], None) def finish_fragment() -> None: nonlocal code nonlocal fragment if fragment is not None: with _collect_error(match): fragment = format_code_block(fragment) fragment_lines = fragment.splitlines() code += f"{PYCON_PREFIX}{fragment_lines[0]}\n" for line in fragment_lines[1:]: # Skip blank lines to handle Black adding a blank above # functions within blocks. A blank line would end the REPL # continuation prompt. # # >>> if True: # ... def f(): # ... pass # ... if line: code += f"{PYCON_CONTINUATION_PREFIX} {line}\n" if fragment_lines[-1].startswith(" "): code += f"{PYCON_CONTINUATION_PREFIX}\n" fragment = None indentation = None for line in match["code"].splitlines(): orig_line, line = line, line.lstrip() if indentation is None and line: indentation = len(orig_line) - len(line) continuation_match = PYCON_CONTINUATION_RE.match(line) if continuation_match and fragment is not None: fragment += line[continuation_match.end() :] + "\n" else: finish_fragment() if line.startswith(PYCON_PREFIX): fragment = line[len(PYCON_PREFIX) :] + "\n" else: code += orig_line[indentation:] + "\n" finish_fragment() return code def _md_pycon_match(match: Match[str]) -> str: code = _pycon_match(match) code = textwrap.indent(code, match["indent"]) return f'{match["before"]}{code}{match["after"]}' src = MD_RE.sub(_md_match, src) src = MD_PYCON_RE.sub(_md_pycon_match, src) return src, errors def format_code_block(code: str) -> str: return subprocess.check_output( [ sys.executable, "-m", "ruff", "format", "--stdin-filename=script.py", f"--line-length={DEFAULT_LINE_LENGTH}", ], encoding="utf-8", input=code, ) def format_file( filename: str, skip_errors: bool, ) -> int: with open(filename, encoding="UTF-8") as f: contents = f.read() new_contents, errors = format_str(contents) for error in errors: lineno = contents[: error.offset].count("\n") + 1 print(f"{filename}:{lineno}: code block parse error {error.exc}") if errors and not skip_errors: return 1 if contents != new_contents: print(f"{filename}: Rewriting...") with open(filename, "w", encoding="UTF-8") as f: f.write(new_contents) return 0 else: return 0 def main(argv: Sequence[str] | None = None) -> int: parser = argparse.ArgumentParser() parser.add_argument( "-l", "--line-length", type=int, default=DEFAULT_LINE_LENGTH, ) parser.add_argument( "-S", "--skip-string-normalization", action="store_true", ) parser.add_argument("-E", "--skip-errors", action="store_true") parser.add_argument("filenames", nargs="*") args = parser.parse_args(argv) retv = 0 for filename in args.filenames: retv |= format_file(filename, skip_errors=args.skip_errors) return retv if __name__ == "__main__": raise SystemExit(main()) openai-python-1.12.0/bin/test000077500000000000000000000001021456126711000160360ustar00rootroot00000000000000#!/usr/bin/env bash bin/check-test-server && rye run pytest "$@" openai-python-1.12.0/examples/000077500000000000000000000000001456126711000162065ustar00rootroot00000000000000openai-python-1.12.0/examples/.keep000066400000000000000000000003571456126711000171400ustar00rootroot00000000000000File generated from our OpenAPI spec by Stainless. This directory can be used to store example files demonstrating usage of this SDK. It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.openai-python-1.12.0/examples/assistant.py000066400000000000000000000024701456126711000205740ustar00rootroot00000000000000import time import openai # gets API Key from environment variable OPENAI_API_KEY client = openai.OpenAI() assistant = client.beta.assistants.create( name="Math Tutor", instructions="You are a personal math tutor. Write and run code to answer math questions.", tools=[{"type": "code_interpreter"}], model="gpt-4-1106-preview", ) thread = client.beta.threads.create() message = client.beta.threads.messages.create( thread_id=thread.id, role="user", content="I need to solve the equation `3x + 11 = 14`. Can you help me?", ) run = client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant.id, instructions="Please address the user as Jane Doe. The user has a premium account.", ) print("checking assistant status. ") while True: run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id) if run.status == "completed": print("done!") messages = client.beta.threads.messages.list(thread_id=thread.id) print("messages: ") for message in messages: assert message.content[0].type == "text" print({"role": message.role, "message": message.content[0].text.value}) client.beta.assistants.delete(assistant.id) break else: print("in progress...") time.sleep(5) openai-python-1.12.0/examples/async_demo.py000077500000000000000000000007161456126711000207100ustar00rootroot00000000000000#!/usr/bin/env -S poetry run python import asyncio from openai import AsyncOpenAI # gets API Key from environment variable OPENAI_API_KEY client = AsyncOpenAI() async def main() -> None: stream = await client.completions.create( model="gpt-3.5-turbo-instruct", prompt="Say this is a test", stream=True, ) async for completion in stream: print(completion.choices[0].text, end="") print() asyncio.run(main()) openai-python-1.12.0/examples/audio.py000077500000000000000000000016351456126711000176710ustar00rootroot00000000000000#!/usr/bin/env python from pathlib import Path from openai import OpenAI # gets OPENAI_API_KEY from your environment variables openai = OpenAI() speech_file_path = Path(__file__).parent / "speech.mp3" def main() -> None: # Create text-to-speech audio file with openai.audio.speech.with_streaming_response.create( model="tts-1", voice="alloy", input="the quick brown fox jumped over the lazy dogs", ) as response: response.stream_to_file(speech_file_path) # Create transcription from audio file transcription = openai.audio.transcriptions.create( model="whisper-1", file=speech_file_path, ) print(transcription.text) # Create translation from audio file translation = openai.audio.translations.create( model="whisper-1", file=speech_file_path, ) print(translation.text) if __name__ == "__main__": main() openai-python-1.12.0/examples/azure.py000077500000000000000000000026641456126711000177210ustar00rootroot00000000000000from openai import AzureOpenAI # may change in the future # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning api_version = "2023-07-01-preview" # gets the API Key from environment variable AZURE_OPENAI_API_KEY client = AzureOpenAI( api_version=api_version, # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource azure_endpoint="https://example-endpoint.openai.azure.com", ) completion = client.chat.completions.create( model="deployment-name", # e.g. gpt-35-instant messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], ) print(completion.model_dump_json(indent=2)) deployment_client = AzureOpenAI( api_version=api_version, # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource azure_endpoint="https://example-resource.azure.openai.com/", # Navigate to the Azure OpenAI Studio to deploy a model. azure_deployment="deployment-name", # e.g. gpt-35-instant ) completion = deployment_client.chat.completions.create( model="", messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], ) print(completion.model_dump_json(indent=2)) openai-python-1.12.0/examples/azure_ad.py000077500000000000000000000017271456126711000203640ustar00rootroot00000000000000from azure.identity import DefaultAzureCredential, get_bearer_token_provider from openai import AzureOpenAI token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") # may change in the future # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning api_version = "2023-07-01-preview" # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource endpoint = "https://my-resource.openai.azure.com" client = AzureOpenAI( api_version=api_version, azure_endpoint=endpoint, azure_ad_token_provider=token_provider, ) completion = client.chat.completions.create( model="deployment-name", # e.g. gpt-35-instant messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], ) print(completion.model_dump_json(indent=2)) openai-python-1.12.0/examples/demo.py000077500000000000000000000014741456126711000175150ustar00rootroot00000000000000#!/usr/bin/env -S poetry run python from openai import OpenAI # gets API Key from environment variable OPENAI_API_KEY client = OpenAI() # Non-streaming: print("----- standard request -----") completion = client.chat.completions.create( model="gpt-4", messages=[ { "role": "user", "content": "Say this is a test", }, ], ) print(completion.choices[0].message.content) # Streaming: print("----- streaming request -----") stream = client.chat.completions.create( model="gpt-4", messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], stream=True, ) for chunk in stream: if not chunk.choices: continue print(chunk.choices[0].delta.content, end="") print() openai-python-1.12.0/examples/module_client.py000077500000000000000000000012231456126711000214040ustar00rootroot00000000000000import openai # will default to `os.environ['OPENAI_API_KEY']` if not explicitly set openai.api_key = "..." # all client options can be configured just like the `OpenAI` instantiation counterpart openai.base_url = "https://..." openai.default_headers = {"x-foo": "true"} # all API calls work in the exact same fashion as well stream = openai.chat.completions.create( model="gpt-4", messages=[ { "role": "user", "content": "How do I output all files in a directory using Python?", }, ], stream=True, ) for chunk in stream: print(chunk.choices[0].delta.content or "", end="", flush=True) print() openai-python-1.12.0/examples/picture.py000066400000000000000000000007161456126711000202370ustar00rootroot00000000000000#!/usr/bin/env python from openai import OpenAI # gets OPENAI_API_KEY from your environment variables openai = OpenAI() prompt = "An astronaut lounging in a tropical resort in space, pixel art" model = "dall-e-3" def main() -> None: # Generate an image based on the prompt response = openai.images.generate(prompt=prompt, model=model) # Prints response containing a URL link to image print(response) if __name__ == "__main__": main() openai-python-1.12.0/examples/streaming.py000077500000000000000000000031521456126711000205550ustar00rootroot00000000000000#!/usr/bin/env -S poetry run python import asyncio from openai import OpenAI, AsyncOpenAI # This script assumes you have the OPENAI_API_KEY environment variable set to a valid OpenAI API key. # # You can run this script from the root directory like so: # `python examples/streaming.py` def sync_main() -> None: client = OpenAI() response = client.completions.create( model="gpt-3.5-turbo-instruct", prompt="1,2,3,", max_tokens=5, temperature=0, stream=True, ) # You can manually control iteration over the response first = next(response) print(f"got response data: {first.model_dump_json(indent=2)}") # Or you could automatically iterate through all of data. # Note that the for loop will not exit until *all* of the data has been processed. for data in response: print(data.model_dump_json()) async def async_main() -> None: client = AsyncOpenAI() response = await client.completions.create( model="gpt-3.5-turbo-instruct", prompt="1,2,3,", max_tokens=5, temperature=0, stream=True, ) # You can manually control iteration over the response. # In Python 3.10+ you can also use the `await anext(response)` builtin instead first = await response.__anext__() print(f"got response data: {first.model_dump_json(indent=2)}") # Or you could automatically iterate through all of data. # Note that the for loop will not exit until *all* of the data has been processed. async for data in response: print(data.model_dump_json()) sync_main() asyncio.run(async_main()) openai-python-1.12.0/mypy.ini000066400000000000000000000023631456126711000160730ustar00rootroot00000000000000[mypy] pretty = True show_error_codes = True # Exclude _files.py because mypy isn't smart enough to apply # the correct type narrowing and as this is an internal module # it's fine to just use Pyright. exclude = ^(src/openai/_files\.py|_dev/.*\.py)$ strict_equality = True implicit_reexport = True check_untyped_defs = True no_implicit_optional = True warn_return_any = True warn_unreachable = True warn_unused_configs = True # Turn these options off as it could cause conflicts # with the Pyright options. warn_unused_ignores = False warn_redundant_casts = False disallow_any_generics = True disallow_untyped_defs = True disallow_untyped_calls = True disallow_subclassing_any = True disallow_incomplete_defs = True disallow_untyped_decorators = True cache_fine_grained = True # By default, mypy reports an error if you assign a value to the result # of a function call that doesn't return anything. We do this in our test # cases: # ``` # result = ... # assert result is None # ``` # Changing this codegen to make mypy happy would increase complexity # and would not be worth it. disable_error_code = func-returns-value # https://github.com/python/mypy/issues/12162 [mypy.overrides] module = "black.files.*" ignore_errors = true ignore_missing_imports = true openai-python-1.12.0/noxfile.py000066400000000000000000000004471456126711000164130ustar00rootroot00000000000000import nox @nox.session(reuse_venv=True, name="test-pydantic-v1") def test_pydantic_v1(session: nox.Session) -> None: session.install("-r", "requirements-dev.lock") session.install("pydantic<2") session.run("pytest", "--showlocals", "--ignore=tests/functional", *session.posargs) openai-python-1.12.0/pyproject.toml000066400000000000000000000074461456126711000173170ustar00rootroot00000000000000[project] name = "openai" version = "1.12.0" description = "The official Python library for the openai API" readme = "README.md" license = "Apache-2.0" authors = [ { name = "OpenAI", email = "support@openai.com" }, ] dependencies = [ "httpx>=0.23.0, <1", "pydantic>=1.9.0, <3", "typing-extensions>=4.7, <5", "anyio>=3.5.0, <5", "distro>=1.7.0, <2", "sniffio", "cached-property; python_version < '3.8'", "tqdm > 4" ] requires-python = ">= 3.7.1" classifiers = [ "Typing :: Typed", "Intended Audience :: Developers", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Operating System :: OS Independent", "Operating System :: POSIX", "Operating System :: MacOS", "Operating System :: POSIX :: Linux", "Operating System :: Microsoft :: Windows", "Topic :: Software Development :: Libraries :: Python Modules", "License :: OSI Approved :: Apache Software License" ] [project.optional-dependencies] datalib = ["numpy >= 1", "pandas >= 1.2.3", "pandas-stubs >= 1.1.0.11"] [project.urls] Homepage = "https://github.com/openai/openai-python" Repository = "https://github.com/openai/openai-python" [project.scripts] openai = "openai.cli:main" [tool.rye] managed = true # version pins are in requirements-dev.lock dev-dependencies = [ "pyright", "mypy", "respx", "pytest", "pytest-asyncio", "ruff", "time-machine", "nox", "dirty-equals>=0.6.0", "importlib-metadata>=6.7.0", "azure-identity >=1.14.1", "types-tqdm > 4" ] [tool.rye.scripts] format = { chain = [ "format:ruff", "format:docs", "fix:ruff", ]} "format:black" = "black ." "format:docs" = "python bin/ruffen-docs.py README.md api.md" "format:ruff" = "ruff format" "format:isort" = "isort ." "lint" = { chain = [ "check:ruff", "typecheck", ]} "check:ruff" = "ruff ." "fix:ruff" = "ruff --fix ." typecheck = { chain = [ "typecheck:pyright", "typecheck:mypy" ]} "typecheck:pyright" = "pyright" "typecheck:verify-types" = "pyright --verifytypes openai --ignoreexternal" "typecheck:mypy" = "mypy ." [build-system] requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build] include = [ "src/*" ] [tool.hatch.build.targets.wheel] packages = ["src/openai"] [tool.black] line-length = 120 target-version = ["py37"] [tool.pytest.ini_options] testpaths = ["tests"] addopts = "--tb=short" xfail_strict = true asyncio_mode = "auto" filterwarnings = [ "error" ] [tool.pyright] # this enables practically every flag given by pyright. # there are a couple of flags that are still disabled by # default in strict mode as they are experimental and niche. typeCheckingMode = "strict" pythonVersion = "3.7" exclude = [ "_dev", ".venv", ".nox", ] reportImplicitOverride = true reportImportCycles = false reportPrivateUsage = false [tool.ruff] line-length = 120 output-format = "grouped" target-version = "py37" select = [ # isort "I", # bugbear rules "B", # remove unused imports "F401", # bare except statements "E722", # unused arguments "ARG", # print statements "T201", "T203", # misuse of typing.TYPE_CHECKING "TCH004" ] ignore = [ # mutable defaults "B006", ] unfixable = [ # disable auto fix for print statements "T201", "T203", ] ignore-init-module-imports = true [tool.ruff.format] docstring-code-format = true [tool.ruff.lint.isort] length-sort = true length-sort-straight = true combine-as-imports = true extra-standard-library = ["typing_extensions"] known-first-party = ["openai", "tests"] [tool.ruff.per-file-ignores] "bin/**.py" = ["T201", "T203"] "tests/**.py" = ["T201", "T203"] "examples/**.py" = ["T201", "T203"] openai-python-1.12.0/release-please-config.json000066400000000000000000000024521456126711000214200ustar00rootroot00000000000000{ "packages": { ".": {} }, "$schema": "https://raw.githubusercontent.com/stainless-api/release-please/main/schemas/config.json", "include-v-in-tag": true, "include-component-in-tag": false, "versioning": "prerelease", "prerelease": true, "bump-minor-pre-major": true, "bump-patch-for-minor-pre-major": false, "pull-request-header": "Automated Release PR", "pull-request-title-pattern": "release: ${version}", "changelog-sections": [ { "type": "feat", "section": "Features" }, { "type": "fix", "section": "Bug Fixes" }, { "type": "perf", "section": "Performance Improvements" }, { "type": "revert", "section": "Reverts" }, { "type": "chore", "section": "Chores" }, { "type": "docs", "section": "Documentation" }, { "type": "style", "section": "Styles" }, { "type": "refactor", "section": "Refactors" }, { "type": "test", "section": "Tests", "hidden": true }, { "type": "build", "section": "Build System" }, { "type": "ci", "section": "Continuous Integration", "hidden": true } ], "release-type": "python", "extra-files": [ "src/openai/_version.py" ] }openai-python-1.12.0/requirements-dev.lock000066400000000000000000000024431456126711000205440ustar00rootroot00000000000000# generated by rye # use `rye lock` or `rye sync` to update this lockfile # # last locked with the following flags: # pre: false # features: [] # all-features: true -e file:. annotated-types==0.6.0 anyio==4.1.0 argcomplete==3.1.2 attrs==23.1.0 azure-core==1.29.6 azure-identity==1.15.0 certifi==2023.7.22 cffi==1.16.0 charset-normalizer==3.3.2 colorlog==6.7.0 cryptography==41.0.7 dirty-equals==0.6.0 distlib==0.3.7 distro==1.8.0 exceptiongroup==1.1.3 filelock==3.12.4 h11==0.14.0 httpcore==1.0.2 httpx==0.25.2 idna==3.4 importlib-metadata==7.0.0 iniconfig==2.0.0 msal==1.26.0 msal-extensions==1.1.0 mypy==1.7.1 mypy-extensions==1.0.0 nodeenv==1.8.0 nox==2023.4.22 numpy==1.26.3 packaging==23.2 pandas==2.1.4 pandas-stubs==2.1.4.231227 platformdirs==3.11.0 pluggy==1.3.0 portalocker==2.8.2 py==1.11.0 pycparser==2.21 pydantic==2.4.2 pydantic-core==2.10.1 pyjwt==2.8.0 pyright==1.1.332 pytest==7.1.1 pytest-asyncio==0.21.1 python-dateutil==2.8.2 pytz==2023.3.post1 requests==2.31.0 respx==0.20.2 ruff==0.1.9 six==1.16.0 sniffio==1.3.0 time-machine==2.9.0 tomli==2.0.1 tqdm==4.66.1 types-pytz==2023.3.1.1 types-tqdm==4.66.0.2 typing-extensions==4.8.0 tzdata==2023.4 urllib3==2.1.0 virtualenv==20.24.5 zipp==3.17.0 # The following packages are considered to be unsafe in a requirements file: setuptools==68.2.2 openai-python-1.12.0/requirements.lock000066400000000000000000000010631456126711000177650ustar00rootroot00000000000000# generated by rye # use `rye lock` or `rye sync` to update this lockfile # # last locked with the following flags: # pre: false # features: [] # all-features: true -e file:. annotated-types==0.6.0 anyio==4.1.0 certifi==2023.7.22 distro==1.8.0 exceptiongroup==1.1.3 h11==0.14.0 httpcore==1.0.2 httpx==0.25.2 idna==3.4 numpy==1.26.2 pandas==2.1.3 pandas-stubs==2.1.1.230928 pydantic==2.4.2 pydantic-core==2.10.1 python-dateutil==2.8.2 pytz==2023.3.post1 six==1.16.0 sniffio==1.3.0 tqdm==4.66.1 types-pytz==2023.3.1.1 typing-extensions==4.8.0 tzdata==2023.3 openai-python-1.12.0/src/000077500000000000000000000000001456126711000151575ustar00rootroot00000000000000openai-python-1.12.0/src/openai/000077500000000000000000000000001456126711000164325ustar00rootroot00000000000000openai-python-1.12.0/src/openai/__init__.py000066400000000000000000000216641456126711000205540ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os as _os from typing_extensions import override from . import types from ._types import NoneType, Transport, ProxiesTypes from ._utils import file_from_path from ._client import Client, OpenAI, Stream, Timeout, Transport, AsyncClient, AsyncOpenAI, AsyncStream, RequestOptions from ._models import BaseModel from ._version import __title__, __version__ from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse from ._exceptions import ( APIError, OpenAIError, ConflictError, NotFoundError, APIStatusError, RateLimitError, APITimeoutError, BadRequestError, APIConnectionError, AuthenticationError, InternalServerError, PermissionDeniedError, UnprocessableEntityError, APIResponseValidationError, ) from ._utils._logs import setup_logging as _setup_logging __all__ = [ "types", "__version__", "__title__", "NoneType", "Transport", "ProxiesTypes", "OpenAIError", "APIError", "APIStatusError", "APITimeoutError", "APIConnectionError", "APIResponseValidationError", "BadRequestError", "AuthenticationError", "PermissionDeniedError", "NotFoundError", "ConflictError", "UnprocessableEntityError", "RateLimitError", "InternalServerError", "Timeout", "RequestOptions", "Client", "AsyncClient", "Stream", "AsyncStream", "OpenAI", "AsyncOpenAI", "file_from_path", "BaseModel", ] from .lib import azure as _azure from .version import VERSION as VERSION from .lib.azure import AzureOpenAI as AzureOpenAI, AsyncAzureOpenAI as AsyncAzureOpenAI from .lib._old_api import * _setup_logging() # Update the __module__ attribute for exported symbols so that # error messages point to this module instead of the module # it was originally defined in, e.g. # openai._exceptions.NotFoundError -> openai.NotFoundError __locals = locals() for __name in __all__: if not __name.startswith("__"): try: __locals[__name].__module__ = "openai" except (TypeError, AttributeError): # Some of our exported symbols are builtins which we can't set attributes for. pass # ------ Module level client ------ import typing as _t import typing_extensions as _te import httpx as _httpx from ._base_client import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES api_key: str | None = None organization: str | None = None base_url: str | _httpx.URL | None = None timeout: float | Timeout | None = DEFAULT_TIMEOUT max_retries: int = DEFAULT_MAX_RETRIES default_headers: _t.Mapping[str, str] | None = None default_query: _t.Mapping[str, object] | None = None http_client: _httpx.Client | None = None _ApiType = _te.Literal["openai", "azure"] api_type: _ApiType | None = _t.cast(_ApiType, _os.environ.get("OPENAI_API_TYPE")) api_version: str | None = _os.environ.get("OPENAI_API_VERSION") azure_endpoint: str | None = _os.environ.get("AZURE_OPENAI_ENDPOINT") azure_ad_token: str | None = _os.environ.get("AZURE_OPENAI_AD_TOKEN") azure_ad_token_provider: _azure.AzureADTokenProvider | None = None class _ModuleClient(OpenAI): # Note: we have to use type: ignores here as overriding class members # with properties is technically unsafe but it is fine for our use case @property # type: ignore @override def api_key(self) -> str | None: return api_key @api_key.setter # type: ignore def api_key(self, value: str | None) -> None: # type: ignore global api_key api_key = value @property # type: ignore @override def organization(self) -> str | None: return organization @organization.setter # type: ignore def organization(self, value: str | None) -> None: # type: ignore global organization organization = value @property @override def base_url(self) -> _httpx.URL: if base_url is not None: return _httpx.URL(base_url) return super().base_url @base_url.setter def base_url(self, url: _httpx.URL | str) -> None: super().base_url = url # type: ignore[misc] @property # type: ignore @override def timeout(self) -> float | Timeout | None: return timeout @timeout.setter # type: ignore def timeout(self, value: float | Timeout | None) -> None: # type: ignore global timeout timeout = value @property # type: ignore @override def max_retries(self) -> int: return max_retries @max_retries.setter # type: ignore def max_retries(self, value: int) -> None: # type: ignore global max_retries max_retries = value @property # type: ignore @override def _custom_headers(self) -> _t.Mapping[str, str] | None: return default_headers @_custom_headers.setter # type: ignore def _custom_headers(self, value: _t.Mapping[str, str] | None) -> None: # type: ignore global default_headers default_headers = value @property # type: ignore @override def _custom_query(self) -> _t.Mapping[str, object] | None: return default_query @_custom_query.setter # type: ignore def _custom_query(self, value: _t.Mapping[str, object] | None) -> None: # type: ignore global default_query default_query = value @property # type: ignore @override def _client(self) -> _httpx.Client: return http_client or super()._client @_client.setter # type: ignore def _client(self, value: _httpx.Client) -> None: # type: ignore global http_client http_client = value class _AzureModuleClient(_ModuleClient, AzureOpenAI): # type: ignore ... class _AmbiguousModuleClientUsageError(OpenAIError): def __init__(self) -> None: super().__init__( "Ambiguous use of module client; please set `openai.api_type` or the `OPENAI_API_TYPE` environment variable to `openai` or `azure`" ) def _has_openai_credentials() -> bool: return _os.environ.get("OPENAI_API_KEY") is not None def _has_azure_credentials() -> bool: return azure_endpoint is not None or _os.environ.get("AZURE_OPENAI_API_KEY") is not None def _has_azure_ad_credentials() -> bool: return ( _os.environ.get("AZURE_OPENAI_AD_TOKEN") is not None or azure_ad_token is not None or azure_ad_token_provider is not None ) _client: OpenAI | None = None def _load_client() -> OpenAI: # type: ignore[reportUnusedFunction] global _client if _client is None: global api_type, azure_endpoint, azure_ad_token, api_version if azure_endpoint is None: azure_endpoint = _os.environ.get("AZURE_OPENAI_ENDPOINT") if azure_ad_token is None: azure_ad_token = _os.environ.get("AZURE_OPENAI_AD_TOKEN") if api_version is None: api_version = _os.environ.get("OPENAI_API_VERSION") if api_type is None: has_openai = _has_openai_credentials() has_azure = _has_azure_credentials() has_azure_ad = _has_azure_ad_credentials() if has_openai and (has_azure or has_azure_ad): raise _AmbiguousModuleClientUsageError() if (azure_ad_token is not None or azure_ad_token_provider is not None) and _os.environ.get( "AZURE_OPENAI_API_KEY" ) is not None: raise _AmbiguousModuleClientUsageError() if has_azure or has_azure_ad: api_type = "azure" else: api_type = "openai" if api_type == "azure": _client = _AzureModuleClient( # type: ignore api_version=api_version, azure_endpoint=azure_endpoint, api_key=api_key, azure_ad_token=azure_ad_token, azure_ad_token_provider=azure_ad_token_provider, organization=organization, base_url=base_url, timeout=timeout, max_retries=max_retries, default_headers=default_headers, default_query=default_query, http_client=http_client, ) return _client _client = _ModuleClient( api_key=api_key, organization=organization, base_url=base_url, timeout=timeout, max_retries=max_retries, default_headers=default_headers, default_query=default_query, http_client=http_client, ) return _client return _client def _reset_client() -> None: # type: ignore[reportUnusedFunction] global _client _client = None from ._module_client import ( beta as beta, chat as chat, audio as audio, files as files, images as images, models as models, embeddings as embeddings, completions as completions, fine_tuning as fine_tuning, moderations as moderations, ) openai-python-1.12.0/src/openai/__main__.py000066400000000000000000000000361456126711000205230ustar00rootroot00000000000000from .cli import main main() openai-python-1.12.0/src/openai/_base_client.py000066400000000000000000001722771456126711000214330ustar00rootroot00000000000000from __future__ import annotations import json import time import uuid import email import asyncio import inspect import logging import platform import warnings import email.utils from types import TracebackType from random import random from typing import ( TYPE_CHECKING, Any, Dict, Type, Union, Generic, Mapping, TypeVar, Iterable, Iterator, Optional, Generator, AsyncIterator, cast, overload, ) from functools import lru_cache from typing_extensions import Literal, override, get_origin import anyio import httpx import distro import pydantic from httpx import URL, Limits from pydantic import PrivateAttr from . import _exceptions from ._qs import Querystring from ._files import to_httpx_files, async_to_httpx_files from ._types import ( NOT_GIVEN, Body, Omit, Query, Headers, Timeout, NotGiven, ResponseT, Transport, AnyMapping, PostParser, ProxiesTypes, RequestFiles, HttpxSendArgs, AsyncTransport, RequestOptions, ModelBuilderProtocol, ) from ._utils import is_dict, is_list, is_given, is_mapping from ._compat import model_copy, model_dump from ._models import GenericModel, FinalRequestOptions, validate_type, construct_type from ._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, extract_response_type, ) from ._constants import ( DEFAULT_LIMITS, DEFAULT_TIMEOUT, MAX_RETRY_DELAY, DEFAULT_MAX_RETRIES, INITIAL_RETRY_DELAY, RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER, ) from ._streaming import Stream, AsyncStream from ._exceptions import ( APIStatusError, APITimeoutError, APIConnectionError, APIResponseValidationError, ) from ._legacy_response import LegacyAPIResponse log: logging.Logger = logging.getLogger(__name__) # TODO: make base page type vars covariant SyncPageT = TypeVar("SyncPageT", bound="BaseSyncPage[Any]") AsyncPageT = TypeVar("AsyncPageT", bound="BaseAsyncPage[Any]") _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _StreamT = TypeVar("_StreamT", bound=Stream[Any]) _AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any]) if TYPE_CHECKING: from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT else: try: from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT except ImportError: # taken from https://github.com/encode/httpx/blob/3ba5fe0d7ac70222590e759c31442b1cab263791/httpx/_config.py#L366 HTTPX_DEFAULT_TIMEOUT = Timeout(5.0) class PageInfo: """Stores the necessary information to build the request to retrieve the next page. Either `url` or `params` must be set. """ url: URL | NotGiven params: Query | NotGiven @overload def __init__( self, *, url: URL, ) -> None: ... @overload def __init__( self, *, params: Query, ) -> None: ... def __init__( self, *, url: URL | NotGiven = NOT_GIVEN, params: Query | NotGiven = NOT_GIVEN, ) -> None: self.url = url self.params = params class BasePage(GenericModel, Generic[_T]): """ Defines the core interface for pagination. Type Args: ModelT: The pydantic model that represents an item in the response. Methods: has_next_page(): Check if there is another page available next_page_info(): Get the necessary information to make a request for the next page """ _options: FinalRequestOptions = PrivateAttr() _model: Type[_T] = PrivateAttr() def has_next_page(self) -> bool: items = self._get_page_items() if not items: return False return self.next_page_info() is not None def next_page_info(self) -> Optional[PageInfo]: ... def _get_page_items(self) -> Iterable[_T]: # type: ignore[empty-body] ... def _params_from_url(self, url: URL) -> httpx.QueryParams: # TODO: do we have to preprocess params here? return httpx.QueryParams(cast(Any, self._options.params)).merge(url.params) def _info_to_options(self, info: PageInfo) -> FinalRequestOptions: options = model_copy(self._options) options._strip_raw_response_header() if not isinstance(info.params, NotGiven): options.params = {**options.params, **info.params} return options if not isinstance(info.url, NotGiven): params = self._params_from_url(info.url) url = info.url.copy_with(params=params) options.params = dict(url.params) options.url = str(url) return options raise ValueError("Unexpected PageInfo state") class BaseSyncPage(BasePage[_T], Generic[_T]): _client: SyncAPIClient = pydantic.PrivateAttr() def _set_private_attributes( self, client: SyncAPIClient, model: Type[_T], options: FinalRequestOptions, ) -> None: self._model = model self._client = client self._options = options # Pydantic uses a custom `__iter__` method to support casting BaseModels # to dictionaries. e.g. dict(model). # As we want to support `for item in page`, this is inherently incompatible # with the default pydantic behaviour. It is not possible to support both # use cases at once. Fortunately, this is not a big deal as all other pydantic # methods should continue to work as expected as there is an alternative method # to cast a model to a dictionary, model.dict(), which is used internally # by pydantic. def __iter__(self) -> Iterator[_T]: # type: ignore for page in self.iter_pages(): for item in page._get_page_items(): yield item def iter_pages(self: SyncPageT) -> Iterator[SyncPageT]: page = self while True: yield page if page.has_next_page(): page = page.get_next_page() else: return def get_next_page(self: SyncPageT) -> SyncPageT: info = self.next_page_info() if not info: raise RuntimeError( "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." ) options = self._info_to_options(info) return self._client._request_api_list(self._model, page=self.__class__, options=options) class AsyncPaginator(Generic[_T, AsyncPageT]): def __init__( self, client: AsyncAPIClient, options: FinalRequestOptions, page_cls: Type[AsyncPageT], model: Type[_T], ) -> None: self._model = model self._client = client self._options = options self._page_cls = page_cls def __await__(self) -> Generator[Any, None, AsyncPageT]: return self._get_page().__await__() async def _get_page(self) -> AsyncPageT: def _parser(resp: AsyncPageT) -> AsyncPageT: resp._set_private_attributes( model=self._model, options=self._options, client=self._client, ) return resp self._options.post_parser = _parser return await self._client.request(self._page_cls, self._options) async def __aiter__(self) -> AsyncIterator[_T]: # https://github.com/microsoft/pyright/issues/3464 page = cast( AsyncPageT, await self, # type: ignore ) async for item in page: yield item class BaseAsyncPage(BasePage[_T], Generic[_T]): _client: AsyncAPIClient = pydantic.PrivateAttr() def _set_private_attributes( self, model: Type[_T], client: AsyncAPIClient, options: FinalRequestOptions, ) -> None: self._model = model self._client = client self._options = options async def __aiter__(self) -> AsyncIterator[_T]: async for page in self.iter_pages(): for item in page._get_page_items(): yield item async def iter_pages(self: AsyncPageT) -> AsyncIterator[AsyncPageT]: page = self while True: yield page if page.has_next_page(): page = await page.get_next_page() else: return async def get_next_page(self: AsyncPageT) -> AsyncPageT: info = self.next_page_info() if not info: raise RuntimeError( "No next page expected; please check `.has_next_page()` before calling `.get_next_page()`." ) options = self._info_to_options(info) return await self._client._request_api_list(self._model, page=self.__class__, options=options) _HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient]) _DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]]) class BaseClient(Generic[_HttpxClientT, _DefaultStreamT]): _client: _HttpxClientT _version: str _base_url: URL max_retries: int timeout: Union[float, Timeout, None] _limits: httpx.Limits _proxies: ProxiesTypes | None _transport: Transport | AsyncTransport | None _strict_response_validation: bool _idempotency_header: str | None _default_stream_cls: type[_DefaultStreamT] | None = None def __init__( self, *, version: str, base_url: str | URL, _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None = DEFAULT_TIMEOUT, limits: httpx.Limits, transport: Transport | AsyncTransport | None, proxies: ProxiesTypes | None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, ) -> None: self._version = version self._base_url = self._enforce_trailing_slash(URL(base_url)) self.max_retries = max_retries self.timeout = timeout self._limits = limits self._proxies = proxies self._transport = transport self._custom_headers = custom_headers or {} self._custom_query = custom_query or {} self._strict_response_validation = _strict_response_validation self._idempotency_header = None def _enforce_trailing_slash(self, url: URL) -> URL: if url.raw_path.endswith(b"/"): return url return url.copy_with(raw_path=url.raw_path + b"/") def _make_status_error_from_response( self, response: httpx.Response, ) -> APIStatusError: if response.is_closed and not response.is_stream_consumed: # We can't read the response body as it has been closed # before it was read. This can happen if an event hook # raises a status error. body = None err_msg = f"Error code: {response.status_code}" else: err_text = response.text.strip() body = err_text try: body = json.loads(err_text) err_msg = f"Error code: {response.status_code} - {body}" except Exception: err_msg = err_text or f"Error code: {response.status_code}" return self._make_status_error(err_msg, body=body, response=response) def _make_status_error( self, err_msg: str, *, body: object, response: httpx.Response, ) -> _exceptions.APIStatusError: raise NotImplementedError() def _remaining_retries( self, remaining_retries: Optional[int], options: FinalRequestOptions, ) -> int: return remaining_retries if remaining_retries is not None else options.get_max_retries(self.max_retries) def _build_headers(self, options: FinalRequestOptions) -> httpx.Headers: custom_headers = options.headers or {} headers_dict = _merge_mappings(self.default_headers, custom_headers) self._validate_headers(headers_dict, custom_headers) # headers are case-insensitive while dictionaries are not. headers = httpx.Headers(headers_dict) idempotency_header = self._idempotency_header if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers: headers[idempotency_header] = options.idempotency_key or self._idempotency_key() return headers def _prepare_url(self, url: str) -> URL: """ Merge a URL argument together with any 'base_url' on the client, to create the URL used for the outgoing request. """ # Copied from httpx's `_merge_url` method. merge_url = URL(url) if merge_url.is_relative_url: merge_raw_path = self.base_url.raw_path + merge_url.raw_path.lstrip(b"/") return self.base_url.copy_with(raw_path=merge_raw_path) return merge_url def _build_request( self, options: FinalRequestOptions, ) -> httpx.Request: if log.isEnabledFor(logging.DEBUG): log.debug("Request options: %s", model_dump(options, exclude_unset=True)) kwargs: dict[str, Any] = {} json_data = options.json_data if options.extra_json is not None: if json_data is None: json_data = cast(Body, options.extra_json) elif is_mapping(json_data): json_data = _merge_mappings(json_data, options.extra_json) else: raise RuntimeError(f"Unexpected JSON data type, {type(json_data)}, cannot merge with `extra_body`") headers = self._build_headers(options) params = _merge_mappings(self._custom_query, options.params) content_type = headers.get("Content-Type") # If the given Content-Type header is multipart/form-data then it # has to be removed so that httpx can generate the header with # additional information for us as it has to be in this form # for the server to be able to correctly parse the request: # multipart/form-data; boundary=---abc-- if content_type is not None and content_type.startswith("multipart/form-data"): if "boundary" not in content_type: # only remove the header if the boundary hasn't been explicitly set # as the caller doesn't want httpx to come up with their own boundary headers.pop("Content-Type") # As we are now sending multipart/form-data instead of application/json # we need to tell httpx to use it, https://www.python-httpx.org/advanced/#multipart-file-encoding if json_data: if not is_dict(json_data): raise TypeError( f"Expected query input to be a dictionary for multipart requests but got {type(json_data)} instead." ) kwargs["data"] = self._serialize_multipartform(json_data) # TODO: report this error to httpx return self._client.build_request( # pyright: ignore[reportUnknownMemberType] headers=headers, timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout, method=options.method, url=self._prepare_url(options.url), # the `Query` type that we use is incompatible with qs' # `Params` type as it needs to be typed as `Mapping[str, object]` # so that passing a `TypedDict` doesn't cause an error. # https://github.com/microsoft/pyright/issues/3526#event-6715453066 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None, json=json_data, files=options.files, **kwargs, ) def _serialize_multipartform(self, data: Mapping[object, object]) -> dict[str, object]: items = self.qs.stringify_items( # TODO: type ignore is required as stringify_items is well typed but we can't be # well typed without heavy validation. data, # type: ignore array_format="brackets", ) serialized: dict[str, object] = {} for key, value in items: existing = serialized.get(key) if not existing: serialized[key] = value continue # If a value has already been set for this key then that # means we're sending data like `array[]=[1, 2, 3]` and we # need to tell httpx that we want to send multiple values with # the same key which is done by using a list or a tuple. # # Note: 2d arrays should never result in the same key at both # levels so it's safe to assume that if the value is a list, # it was because we changed it to be a list. if is_list(existing): existing.append(value) else: serialized[key] = [existing, value] return serialized def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalRequestOptions) -> type[ResponseT]: if not is_given(options.headers): return cast_to # make a copy of the headers so we don't mutate user-input headers = dict(options.headers) # we internally support defining a temporary header to override the # default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response` # see _response.py for implementation details override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN) if is_given(override_cast_to): options.headers = headers return cast(Type[ResponseT], override_cast_to) return cast_to def _should_stream_response_body(self, request: httpx.Request) -> bool: return request.headers.get(RAW_RESPONSE_HEADER) == "stream" # type: ignore[no-any-return] def _process_response_data( self, *, data: object, cast_to: type[ResponseT], response: httpx.Response, ) -> ResponseT: if data is None: return cast(ResponseT, None) if cast_to is object: return cast(ResponseT, data) try: if inspect.isclass(cast_to) and issubclass(cast_to, ModelBuilderProtocol): return cast(ResponseT, cast_to.build(response=response, data=data)) if self._strict_response_validation: return cast(ResponseT, validate_type(type_=cast_to, value=data)) return cast(ResponseT, construct_type(type_=cast_to, value=data)) except pydantic.ValidationError as err: raise APIResponseValidationError(response=response, body=data) from err @property def qs(self) -> Querystring: return Querystring() @property def custom_auth(self) -> httpx.Auth | None: return None @property def auth_headers(self) -> dict[str, str]: return {} @property def default_headers(self) -> dict[str, str | Omit]: return { "Accept": "application/json", "Content-Type": "application/json", "User-Agent": self.user_agent, **self.platform_headers(), **self.auth_headers, **self._custom_headers, } def _validate_headers( self, headers: Headers, # noqa: ARG002 custom_headers: Headers, # noqa: ARG002 ) -> None: """Validate the given default headers and custom headers. Does nothing by default. """ return @property def user_agent(self) -> str: return f"{self.__class__.__name__}/Python {self._version}" @property def base_url(self) -> URL: return self._base_url @base_url.setter def base_url(self, url: URL | str) -> None: self._base_url = self._enforce_trailing_slash(url if isinstance(url, URL) else URL(url)) def platform_headers(self) -> Dict[str, str]: return platform_headers(self._version) def _parse_retry_after_header(self, response_headers: Optional[httpx.Headers] = None) -> float | None: """Returns a float of the number of seconds (not milliseconds) to wait after retrying, or None if unspecified. About the Retry-After header: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After See also https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After#syntax """ if response_headers is None: return None # First, try the non-standard `retry-after-ms` header for milliseconds, # which is more precise than integer-seconds `retry-after` try: retry_ms_header = response_headers.get("retry-after-ms", None) return float(retry_ms_header) / 1000 except (TypeError, ValueError): pass # Next, try parsing `retry-after` header as seconds (allowing nonstandard floats). retry_header = response_headers.get("retry-after") try: # note: the spec indicates that this should only ever be an integer # but if someone sends a float there's no reason for us to not respect it return float(retry_header) except (TypeError, ValueError): pass # Last, try parsing `retry-after` as a date. retry_date_tuple = email.utils.parsedate_tz(retry_header) if retry_date_tuple is None: return None retry_date = email.utils.mktime_tz(retry_date_tuple) return float(retry_date - time.time()) def _calculate_retry_timeout( self, remaining_retries: int, options: FinalRequestOptions, response_headers: Optional[httpx.Headers] = None, ) -> float: max_retries = options.get_max_retries(self.max_retries) # If the API asks us to wait a certain amount of time (and it's a reasonable amount), just do what it says. retry_after = self._parse_retry_after_header(response_headers) if retry_after is not None and 0 < retry_after <= 60: return retry_after nb_retries = max_retries - remaining_retries # Apply exponential backoff, but not more than the max. sleep_seconds = min(INITIAL_RETRY_DELAY * pow(2.0, nb_retries), MAX_RETRY_DELAY) # Apply some jitter, plus-or-minus half a second. jitter = 1 - 0.25 * random() timeout = sleep_seconds * jitter return timeout if timeout >= 0 else 0 def _should_retry(self, response: httpx.Response) -> bool: # Note: this is not a standard header should_retry_header = response.headers.get("x-should-retry") # If the server explicitly says whether or not to retry, obey. if should_retry_header == "true": log.debug("Retrying as header `x-should-retry` is set to `true`") return True if should_retry_header == "false": log.debug("Not retrying as header `x-should-retry` is set to `false`") return False # Retry on request timeouts. if response.status_code == 408: log.debug("Retrying due to status code %i", response.status_code) return True # Retry on lock timeouts. if response.status_code == 409: log.debug("Retrying due to status code %i", response.status_code) return True # Retry on rate limits. if response.status_code == 429: log.debug("Retrying due to status code %i", response.status_code) return True # Retry internal errors. if response.status_code >= 500: log.debug("Retrying due to status code %i", response.status_code) return True log.debug("Not retrying") return False def _idempotency_key(self) -> str: return f"stainless-python-retry-{uuid.uuid4()}" class SyncHttpxClientWrapper(httpx.Client): def __del__(self) -> None: try: self.close() except Exception: pass class SyncAPIClient(BaseClient[httpx.Client, Stream[Any]]): _client: httpx.Client _default_stream_cls: type[Stream[Any]] | None = None def __init__( self, *, version: str, base_url: str | URL, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, transport: Transport | None = None, proxies: ProxiesTypes | None = None, limits: Limits | None = None, http_client: httpx.Client | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, _strict_response_validation: bool, ) -> None: if limits is not None: warnings.warn( "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") else: limits = DEFAULT_LIMITS if transport is not None: warnings.warn( "The `transport` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `transport`") if proxies is not None: warnings.warn( "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `proxies`") if not is_given(timeout): # if the user passed in a custom http client with a non-default # timeout set then we use that timeout. # # note: there is an edge case here where the user passes in a client # where they've explicitly set the timeout to match the default timeout # as this check is structural, meaning that we'll think they didn't # pass in a timeout and will ignore it if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: timeout = http_client.timeout else: timeout = DEFAULT_TIMEOUT super().__init__( version=version, limits=limits, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), proxies=proxies, base_url=base_url, transport=transport, max_retries=max_retries, custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, ) self._client = http_client or SyncHttpxClientWrapper( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), proxies=proxies, transport=transport, limits=limits, follow_redirects=True, ) def is_closed(self) -> bool: return self._client.is_closed def close(self) -> None: """Close the underlying HTTPX client. The client will *not* be usable after this. """ # If an error is thrown while constructing a client, self._client # may not be present if hasattr(self, "_client"): self._client.close() def __enter__(self: _T) -> _T: return self def __exit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close() def _prepare_options( self, options: FinalRequestOptions, # noqa: ARG002 ) -> None: """Hook for mutating the given options""" return None def _prepare_request( self, request: httpx.Request, # noqa: ARG002 ) -> None: """This method is used as a callback for mutating the `Request` object after it has been constructed. This is useful for cases where you want to add certain headers based off of the request properties, e.g. `url`, `method` etc. """ return None @overload def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, remaining_retries: Optional[int] = None, *, stream: Literal[True], stream_cls: Type[_StreamT], ) -> _StreamT: ... @overload def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, remaining_retries: Optional[int] = None, *, stream: Literal[False] = False, ) -> ResponseT: ... @overload def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, remaining_retries: Optional[int] = None, *, stream: bool = False, stream_cls: Type[_StreamT] | None = None, ) -> ResponseT | _StreamT: ... def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, remaining_retries: Optional[int] = None, *, stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: return self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, remaining_retries=remaining_retries, ) def _request( self, *, cast_to: Type[ResponseT], options: FinalRequestOptions, remaining_retries: int | None, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: cast_to = self._maybe_override_cast_to(cast_to, options) self._prepare_options(options) retries = self._remaining_retries(remaining_retries, options) request = self._build_request(options) self._prepare_request(request) kwargs: HttpxSendArgs = {} if self.custom_auth is not None: kwargs["auth"] = self.custom_auth try: response = self._client.send( request, stream=stream or self._should_stream_response_body(request=request), **kwargs, ) except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) if retries > 0: return self._retry_request( options, cast_to, retries, stream=stream, stream_cls=stream_cls, response_headers=None, ) log.debug("Raising timeout error") raise APITimeoutError(request=request) from err except Exception as err: log.debug("Encountered Exception", exc_info=True) if retries > 0: return self._retry_request( options, cast_to, retries, stream=stream, stream_cls=stream_cls, response_headers=None, ) log.debug("Raising connection error") raise APIConnectionError(request=request) from err log.debug( 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase ) try: response.raise_for_status() except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) if retries > 0 and self._should_retry(err.response): err.response.close() return self._retry_request( options, cast_to, retries, err.response.headers, stream=stream, stream_cls=stream_cls, ) # If the response is streamed then we need to explicitly read the response # to completion before attempting to access the response text. if not err.response.is_closed: err.response.read() log.debug("Re-raising status error") raise self._make_status_error_from_response(err.response) from None return self._process_response( cast_to=cast_to, options=options, response=response, stream=stream, stream_cls=stream_cls, ) def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], remaining_retries: int, response_headers: httpx.Headers | None, *, stream: bool, stream_cls: type[_StreamT] | None, ) -> ResponseT | _StreamT: remaining = remaining_retries - 1 if remaining == 1: log.debug("1 retry left") else: log.debug("%i retries left", remaining) timeout = self._calculate_retry_timeout(remaining, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a # different thread if necessary. time.sleep(timeout) return self._request( options=options, cast_to=cast_to, remaining_retries=remaining, stream=stream, stream_cls=stream_cls, ) def _process_response( self, *, cast_to: Type[ResponseT], options: FinalRequestOptions, response: httpx.Response, stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, ) -> ResponseT: if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( ResponseT, LegacyAPIResponse( raw=response, client=self, cast_to=cast_to, stream=stream, stream_cls=stream_cls, options=options, ), ) origin = get_origin(cast_to) or cast_to if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): if not issubclass(origin, APIResponse): raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}") response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) return cast( ResponseT, response_cls( raw=response, client=self, cast_to=extract_response_type(response_cls), stream=stream, stream_cls=stream_cls, options=options, ), ) if cast_to == httpx.Response: return cast(ResponseT, response) api_response = APIResponse( raw=response, client=self, cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] stream=stream, stream_cls=stream_cls, options=options, ) if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): return cast(ResponseT, api_response) return api_response.parse() def _request_api_list( self, model: Type[object], page: Type[SyncPageT], options: FinalRequestOptions, ) -> SyncPageT: def _parser(resp: SyncPageT) -> SyncPageT: resp._set_private_attributes( client=self, model=model, options=options, ) return resp options.post_parser = _parser return self.request(page, options, stream=False) @overload def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[False] = False, ) -> ResponseT: ... @overload def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_StreamT], ) -> _StreamT: ... @overload def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: bool, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: ... def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: opts = FinalRequestOptions.construct(method="get", url=path, **options) # cast is required because mypy complains about returning Any even though # it understands the type variables return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) @overload def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: Literal[False] = False, ) -> ResponseT: ... @overload def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: Literal[True], stream_cls: type[_StreamT], ) -> _StreamT: ... @overload def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: bool, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: ... def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, files: RequestFiles | None = None, stream: bool = False, stream_cls: type[_StreamT] | None = None, ) -> ResponseT | _StreamT: opts = FinalRequestOptions.construct( method="post", url=path, json_data=body, files=to_httpx_files(files), **options ) return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls)) def patch( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) return self.request(cast_to, opts) def put( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct( method="put", url=path, json_data=body, files=to_httpx_files(files), **options ) return self.request(cast_to, opts) def delete( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) return self.request(cast_to, opts) def get_api_list( self, path: str, *, model: Type[object], page: Type[SyncPageT], body: Body | None = None, options: RequestOptions = {}, method: str = "get", ) -> SyncPageT: opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) return self._request_api_list(model, page, opts) class AsyncHttpxClientWrapper(httpx.AsyncClient): def __del__(self) -> None: try: # TODO(someday): support non asyncio runtimes here asyncio.get_running_loop().create_task(self.aclose()) except Exception: pass class AsyncAPIClient(BaseClient[httpx.AsyncClient, AsyncStream[Any]]): _client: httpx.AsyncClient _default_stream_cls: type[AsyncStream[Any]] | None = None def __init__( self, *, version: str, base_url: str | URL, _strict_response_validation: bool, max_retries: int = DEFAULT_MAX_RETRIES, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, transport: AsyncTransport | None = None, proxies: ProxiesTypes | None = None, limits: Limits | None = None, http_client: httpx.AsyncClient | None = None, custom_headers: Mapping[str, str] | None = None, custom_query: Mapping[str, object] | None = None, ) -> None: if limits is not None: warnings.warn( "The `connection_pool_limits` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `connection_pool_limits`") else: limits = DEFAULT_LIMITS if transport is not None: warnings.warn( "The `transport` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `transport`") if proxies is not None: warnings.warn( "The `proxies` argument is deprecated. The `http_client` argument should be passed instead", category=DeprecationWarning, stacklevel=3, ) if http_client is not None: raise ValueError("The `http_client` argument is mutually exclusive with `proxies`") if not is_given(timeout): # if the user passed in a custom http client with a non-default # timeout set then we use that timeout. # # note: there is an edge case here where the user passes in a client # where they've explicitly set the timeout to match the default timeout # as this check is structural, meaning that we'll think they didn't # pass in a timeout and will ignore it if http_client and http_client.timeout != HTTPX_DEFAULT_TIMEOUT: timeout = http_client.timeout else: timeout = DEFAULT_TIMEOUT super().__init__( version=version, base_url=base_url, limits=limits, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), proxies=proxies, transport=transport, max_retries=max_retries, custom_query=custom_query, custom_headers=custom_headers, _strict_response_validation=_strict_response_validation, ) self._client = http_client or AsyncHttpxClientWrapper( base_url=base_url, # cast to a valid type because mypy doesn't understand our type narrowing timeout=cast(Timeout, timeout), proxies=proxies, transport=transport, limits=limits, follow_redirects=True, ) def is_closed(self) -> bool: return self._client.is_closed async def close(self) -> None: """Close the underlying HTTPX client. The client will *not* be usable after this. """ await self._client.aclose() async def __aenter__(self: _T) -> _T: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.close() async def _prepare_options( self, options: FinalRequestOptions, # noqa: ARG002 ) -> None: """Hook for mutating the given options""" return None async def _prepare_request( self, request: httpx.Request, # noqa: ARG002 ) -> None: """This method is used as a callback for mutating the `Request` object after it has been constructed. This is useful for cases where you want to add certain headers based off of the request properties, e.g. `url`, `method` etc. """ return None @overload async def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, *, stream: Literal[False] = False, remaining_retries: Optional[int] = None, ) -> ResponseT: ... @overload async def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, *, stream: Literal[True], stream_cls: type[_AsyncStreamT], remaining_retries: Optional[int] = None, ) -> _AsyncStreamT: ... @overload async def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, *, stream: bool, stream_cls: type[_AsyncStreamT] | None = None, remaining_retries: Optional[int] = None, ) -> ResponseT | _AsyncStreamT: ... async def request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, *, stream: bool = False, stream_cls: type[_AsyncStreamT] | None = None, remaining_retries: Optional[int] = None, ) -> ResponseT | _AsyncStreamT: return await self._request( cast_to=cast_to, options=options, stream=stream, stream_cls=stream_cls, remaining_retries=remaining_retries, ) async def _request( self, cast_to: Type[ResponseT], options: FinalRequestOptions, *, stream: bool, stream_cls: type[_AsyncStreamT] | None, remaining_retries: int | None, ) -> ResponseT | _AsyncStreamT: cast_to = self._maybe_override_cast_to(cast_to, options) await self._prepare_options(options) retries = self._remaining_retries(remaining_retries, options) request = self._build_request(options) await self._prepare_request(request) kwargs: HttpxSendArgs = {} if self.custom_auth is not None: kwargs["auth"] = self.custom_auth try: response = await self._client.send( request, stream=stream or self._should_stream_response_body(request=request), **kwargs, ) except httpx.TimeoutException as err: log.debug("Encountered httpx.TimeoutException", exc_info=True) if retries > 0: return await self._retry_request( options, cast_to, retries, stream=stream, stream_cls=stream_cls, response_headers=None, ) log.debug("Raising timeout error") raise APITimeoutError(request=request) from err except Exception as err: log.debug("Encountered Exception", exc_info=True) if retries > 0: return await self._retry_request( options, cast_to, retries, stream=stream, stream_cls=stream_cls, response_headers=None, ) log.debug("Raising connection error") raise APIConnectionError(request=request) from err log.debug( 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase ) try: response.raise_for_status() except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code log.debug("Encountered httpx.HTTPStatusError", exc_info=True) if retries > 0 and self._should_retry(err.response): await err.response.aclose() return await self._retry_request( options, cast_to, retries, err.response.headers, stream=stream, stream_cls=stream_cls, ) # If the response is streamed then we need to explicitly read the response # to completion before attempting to access the response text. if not err.response.is_closed: await err.response.aread() log.debug("Re-raising status error") raise self._make_status_error_from_response(err.response) from None return await self._process_response( cast_to=cast_to, options=options, response=response, stream=stream, stream_cls=stream_cls, ) async def _retry_request( self, options: FinalRequestOptions, cast_to: Type[ResponseT], remaining_retries: int, response_headers: httpx.Headers | None, *, stream: bool, stream_cls: type[_AsyncStreamT] | None, ) -> ResponseT | _AsyncStreamT: remaining = remaining_retries - 1 if remaining == 1: log.debug("1 retry left") else: log.debug("%i retries left", remaining) timeout = self._calculate_retry_timeout(remaining, options, response_headers) log.info("Retrying request to %s in %f seconds", options.url, timeout) await anyio.sleep(timeout) return await self._request( options=options, cast_to=cast_to, remaining_retries=remaining, stream=stream, stream_cls=stream_cls, ) async def _process_response( self, *, cast_to: Type[ResponseT], options: FinalRequestOptions, response: httpx.Response, stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, ) -> ResponseT: if response.request.headers.get(RAW_RESPONSE_HEADER) == "true": return cast( ResponseT, LegacyAPIResponse( raw=response, client=self, cast_to=cast_to, stream=stream, stream_cls=stream_cls, options=options, ), ) origin = get_origin(cast_to) or cast_to if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse): if not issubclass(origin, AsyncAPIResponse): raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}") response_cls = cast("type[BaseAPIResponse[Any]]", cast_to) return cast( "ResponseT", response_cls( raw=response, client=self, cast_to=extract_response_type(response_cls), stream=stream, stream_cls=stream_cls, options=options, ), ) if cast_to == httpx.Response: return cast(ResponseT, response) api_response = AsyncAPIResponse( raw=response, client=self, cast_to=cast("type[ResponseT]", cast_to), # pyright: ignore[reportUnnecessaryCast] stream=stream, stream_cls=stream_cls, options=options, ) if bool(response.request.headers.get(RAW_RESPONSE_HEADER)): return cast(ResponseT, api_response) return await api_response.parse() def _request_api_list( self, model: Type[_T], page: Type[AsyncPageT], options: FinalRequestOptions, ) -> AsyncPaginator[_T, AsyncPageT]: return AsyncPaginator(client=self, options=options, page_cls=page, model=model) @overload async def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[False] = False, ) -> ResponseT: ... @overload async def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_AsyncStreamT], ) -> _AsyncStreamT: ... @overload async def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: bool, stream_cls: type[_AsyncStreamT] | None = None, ) -> ResponseT | _AsyncStreamT: ... async def get( self, path: str, *, cast_to: Type[ResponseT], options: RequestOptions = {}, stream: bool = False, stream_cls: type[_AsyncStreamT] | None = None, ) -> ResponseT | _AsyncStreamT: opts = FinalRequestOptions.construct(method="get", url=path, **options) return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) @overload async def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: Literal[False] = False, ) -> ResponseT: ... @overload async def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: Literal[True], stream_cls: type[_AsyncStreamT], ) -> _AsyncStreamT: ... @overload async def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: bool, stream_cls: type[_AsyncStreamT] | None = None, ) -> ResponseT | _AsyncStreamT: ... async def post( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, stream: bool = False, stream_cls: type[_AsyncStreamT] | None = None, ) -> ResponseT | _AsyncStreamT: opts = FinalRequestOptions.construct( method="post", url=path, json_data=body, files=await async_to_httpx_files(files), **options ) return await self.request(cast_to, opts, stream=stream, stream_cls=stream_cls) async def patch( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct(method="patch", url=path, json_data=body, **options) return await self.request(cast_to, opts) async def put( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, files: RequestFiles | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct( method="put", url=path, json_data=body, files=await async_to_httpx_files(files), **options ) return await self.request(cast_to, opts) async def delete( self, path: str, *, cast_to: Type[ResponseT], body: Body | None = None, options: RequestOptions = {}, ) -> ResponseT: opts = FinalRequestOptions.construct(method="delete", url=path, json_data=body, **options) return await self.request(cast_to, opts) def get_api_list( self, path: str, *, model: Type[_T], page: Type[AsyncPageT], body: Body | None = None, options: RequestOptions = {}, method: str = "get", ) -> AsyncPaginator[_T, AsyncPageT]: opts = FinalRequestOptions.construct(method=method, url=path, json_data=body, **options) return self._request_api_list(model, page, opts) def make_request_options( *, query: Query | None = None, extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, idempotency_key: str | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, post_parser: PostParser | NotGiven = NOT_GIVEN, ) -> RequestOptions: """Create a dict of type RequestOptions without keys of NotGiven values.""" options: RequestOptions = {} if extra_headers is not None: options["headers"] = extra_headers if extra_body is not None: options["extra_json"] = cast(AnyMapping, extra_body) if query is not None: options["params"] = query if extra_query is not None: options["params"] = {**options.get("params", {}), **extra_query} if not isinstance(timeout, NotGiven): options["timeout"] = timeout if idempotency_key is not None: options["idempotency_key"] = idempotency_key if is_given(post_parser): # internal options["post_parser"] = post_parser # type: ignore return options class OtherPlatform: def __init__(self, name: str) -> None: self.name = name @override def __str__(self) -> str: return f"Other:{self.name}" Platform = Union[ OtherPlatform, Literal[ "MacOS", "Linux", "Windows", "FreeBSD", "OpenBSD", "iOS", "Android", "Unknown", ], ] def get_platform() -> Platform: try: system = platform.system().lower() platform_name = platform.platform().lower() except Exception: return "Unknown" if "iphone" in platform_name or "ipad" in platform_name: # Tested using Python3IDE on an iPhone 11 and Pythonista on an iPad 7 # system is Darwin and platform_name is a string like: # - Darwin-21.6.0-iPhone12,1-64bit # - Darwin-21.6.0-iPad7,11-64bit return "iOS" if system == "darwin": return "MacOS" if system == "windows": return "Windows" if "android" in platform_name: # Tested using Pydroid 3 # system is Linux and platform_name is a string like 'Linux-5.10.81-android12-9-00001-geba40aecb3b7-ab8534902-aarch64-with-libc' return "Android" if system == "linux": # https://distro.readthedocs.io/en/latest/#distro.id distro_id = distro.id() if distro_id == "freebsd": return "FreeBSD" if distro_id == "openbsd": return "OpenBSD" return "Linux" if platform_name: return OtherPlatform(platform_name) return "Unknown" @lru_cache(maxsize=None) def platform_headers(version: str) -> Dict[str, str]: return { "X-Stainless-Lang": "python", "X-Stainless-Package-Version": version, "X-Stainless-OS": str(get_platform()), "X-Stainless-Arch": str(get_architecture()), "X-Stainless-Runtime": get_python_runtime(), "X-Stainless-Runtime-Version": get_python_version(), } class OtherArch: def __init__(self, name: str) -> None: self.name = name @override def __str__(self) -> str: return f"other:{self.name}" Arch = Union[OtherArch, Literal["x32", "x64", "arm", "arm64", "unknown"]] def get_python_runtime() -> str: try: return platform.python_implementation() except Exception: return "unknown" def get_python_version() -> str: try: return platform.python_version() except Exception: return "unknown" def get_architecture() -> Arch: try: python_bitness, _ = platform.architecture() machine = platform.machine().lower() except Exception: return "unknown" if machine in ("arm64", "aarch64"): return "arm64" # TODO: untested if machine == "arm": return "arm" if machine == "x86_64": return "x64" # TODO: untested if python_bitness == "32bit": return "x32" if machine: return OtherArch(machine) return "unknown" def _merge_mappings( obj1: Mapping[_T_co, Union[_T, Omit]], obj2: Mapping[_T_co, Union[_T, Omit]], ) -> Dict[_T_co, _T]: """Merge two mappings of the same type, removing any values that are instances of `Omit`. In cases with duplicate keys the second mapping takes precedence. """ merged = {**obj1, **obj2} return {key: value for key, value in merged.items() if not isinstance(value, Omit)} openai-python-1.12.0/src/openai/_client.py000066400000000000000000000472551456126711000204360ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, Union, Mapping from typing_extensions import Self, override import httpx from . import resources, _exceptions from ._qs import Querystring from ._types import ( NOT_GIVEN, Omit, Timeout, NotGiven, Transport, ProxiesTypes, RequestOptions, ) from ._utils import ( is_given, is_mapping, get_async_library, ) from ._version import __version__ from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import OpenAIError, APIStatusError from ._base_client import ( DEFAULT_MAX_RETRIES, SyncAPIClient, AsyncAPIClient, ) __all__ = [ "Timeout", "Transport", "ProxiesTypes", "RequestOptions", "resources", "OpenAI", "AsyncOpenAI", "Client", "AsyncClient", ] class OpenAI(SyncAPIClient): completions: resources.Completions chat: resources.Chat embeddings: resources.Embeddings files: resources.Files images: resources.Images audio: resources.Audio moderations: resources.Moderations models: resources.Models fine_tuning: resources.FineTuning beta: resources.Beta with_raw_response: OpenAIWithRawResponse with_streaming_response: OpenAIWithStreamedResponse # client options api_key: str organization: str | None def __init__( self, *, api_key: str | None = None, organization: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, # Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#client) for more details. http_client: httpx.Client | None = None, # Enable or disable schema validation for data returned by the API. # When enabled an error APIResponseValidationError is raised # if the API responds with invalid data for the expected schema. # # This parameter may be removed or changed in the future. # If you rely on this feature, please open a GitHub issue # outlining your use-case to help us decide if it should be # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: """Construct a new synchronous openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` """ if api_key is None: api_key = os.environ.get("OPENAI_API_KEY") if api_key is None: raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) self.api_key = api_key if organization is None: organization = os.environ.get("OPENAI_ORG_ID") self.organization = organization if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: base_url = f"https://api.openai.com/v1" super().__init__( version=__version__, base_url=base_url, max_retries=max_retries, timeout=timeout, http_client=http_client, custom_headers=default_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, ) self._default_stream_cls = Stream self.completions = resources.Completions(self) self.chat = resources.Chat(self) self.embeddings = resources.Embeddings(self) self.files = resources.Files(self) self.images = resources.Images(self) self.audio = resources.Audio(self) self.moderations = resources.Moderations(self) self.models = resources.Models(self) self.fine_tuning = resources.FineTuning(self) self.beta = resources.Beta(self) self.with_raw_response = OpenAIWithRawResponse(self) self.with_streaming_response = OpenAIWithStreamedResponse(self) @property @override def qs(self) -> Querystring: return Querystring(array_format="comma") @property @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key return {"Authorization": f"Bearer {api_key}"} @property @override def default_headers(self) -> dict[str, str | Omit]: return { **super().default_headers, "X-Stainless-Async": "false", "OpenAI-Organization": self.organization if self.organization is not None else Omit(), **self._custom_headers, } def copy( self, *, api_key: str | None = None, organization: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, max_retries: int | NotGiven = NOT_GIVEN, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: headers = {**headers, **default_headers} elif set_default_headers is not None: headers = set_default_headers params = self._custom_query if default_query is not None: params = {**params, **default_query} elif set_default_query is not None: params = set_default_query http_client = http_client or self._client return self.__class__( api_key=api_key or self.api_key, organization=organization or self.organization, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, max_retries=max_retries if is_given(max_retries) else self.max_retries, default_headers=headers, default_query=params, **_extra_kwargs, ) # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) with_options = copy @override def _make_status_error( self, err_msg: str, *, body: object, response: httpx.Response, ) -> APIStatusError: data = body.get("error", body) if is_mapping(body) else body if response.status_code == 400: return _exceptions.BadRequestError(err_msg, response=response, body=data) if response.status_code == 401: return _exceptions.AuthenticationError(err_msg, response=response, body=data) if response.status_code == 403: return _exceptions.PermissionDeniedError(err_msg, response=response, body=data) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=data) if response.status_code == 409: return _exceptions.ConflictError(err_msg, response=response, body=data) if response.status_code == 422: return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=data) if response.status_code >= 500: return _exceptions.InternalServerError(err_msg, response=response, body=data) return APIStatusError(err_msg, response=response, body=data) class AsyncOpenAI(AsyncAPIClient): completions: resources.AsyncCompletions chat: resources.AsyncChat embeddings: resources.AsyncEmbeddings files: resources.AsyncFiles images: resources.AsyncImages audio: resources.AsyncAudio moderations: resources.AsyncModerations models: resources.AsyncModels fine_tuning: resources.AsyncFineTuning beta: resources.AsyncBeta with_raw_response: AsyncOpenAIWithRawResponse with_streaming_response: AsyncOpenAIWithStreamedResponse # client options api_key: str organization: str | None def __init__( self, *, api_key: str | None = None, organization: str | None = None, base_url: str | httpx.URL | None = None, timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, # Configure a custom httpx client. See the [httpx documentation](https://www.python-httpx.org/api/#asyncclient) for more details. http_client: httpx.AsyncClient | None = None, # Enable or disable schema validation for data returned by the API. # When enabled an error APIResponseValidationError is raised # if the API responds with invalid data for the expected schema. # # This parameter may be removed or changed in the future. # If you rely on this feature, please open a GitHub issue # outlining your use-case to help us decide if it should be # part of our public interface in the future. _strict_response_validation: bool = False, ) -> None: """Construct a new async openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` """ if api_key is None: api_key = os.environ.get("OPENAI_API_KEY") if api_key is None: raise OpenAIError( "The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable" ) self.api_key = api_key if organization is None: organization = os.environ.get("OPENAI_ORG_ID") self.organization = organization if base_url is None: base_url = os.environ.get("OPENAI_BASE_URL") if base_url is None: base_url = f"https://api.openai.com/v1" super().__init__( version=__version__, base_url=base_url, max_retries=max_retries, timeout=timeout, http_client=http_client, custom_headers=default_headers, custom_query=default_query, _strict_response_validation=_strict_response_validation, ) self._default_stream_cls = AsyncStream self.completions = resources.AsyncCompletions(self) self.chat = resources.AsyncChat(self) self.embeddings = resources.AsyncEmbeddings(self) self.files = resources.AsyncFiles(self) self.images = resources.AsyncImages(self) self.audio = resources.AsyncAudio(self) self.moderations = resources.AsyncModerations(self) self.models = resources.AsyncModels(self) self.fine_tuning = resources.AsyncFineTuning(self) self.beta = resources.AsyncBeta(self) self.with_raw_response = AsyncOpenAIWithRawResponse(self) self.with_streaming_response = AsyncOpenAIWithStreamedResponse(self) @property @override def qs(self) -> Querystring: return Querystring(array_format="comma") @property @override def auth_headers(self) -> dict[str, str]: api_key = self.api_key return {"Authorization": f"Bearer {api_key}"} @property @override def default_headers(self) -> dict[str, str | Omit]: return { **super().default_headers, "X-Stainless-Async": f"async:{get_async_library()}", "OpenAI-Organization": self.organization if self.organization is not None else Omit(), **self._custom_headers, } def copy( self, *, api_key: str | None = None, organization: str | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, max_retries: int | NotGiven = NOT_GIVEN, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ Create a new client instance re-using the same options given to the current client with optional overriding. """ if default_headers is not None and set_default_headers is not None: raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive") if default_query is not None and set_default_query is not None: raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive") headers = self._custom_headers if default_headers is not None: headers = {**headers, **default_headers} elif set_default_headers is not None: headers = set_default_headers params = self._custom_query if default_query is not None: params = {**params, **default_query} elif set_default_query is not None: params = set_default_query http_client = http_client or self._client return self.__class__( api_key=api_key or self.api_key, organization=organization or self.organization, base_url=base_url or self.base_url, timeout=self.timeout if isinstance(timeout, NotGiven) else timeout, http_client=http_client, max_retries=max_retries if is_given(max_retries) else self.max_retries, default_headers=headers, default_query=params, **_extra_kwargs, ) # Alias for `copy` for nicer inline usage, e.g. # client.with_options(timeout=10).foo.create(...) with_options = copy @override def _make_status_error( self, err_msg: str, *, body: object, response: httpx.Response, ) -> APIStatusError: data = body.get("error", body) if is_mapping(body) else body if response.status_code == 400: return _exceptions.BadRequestError(err_msg, response=response, body=data) if response.status_code == 401: return _exceptions.AuthenticationError(err_msg, response=response, body=data) if response.status_code == 403: return _exceptions.PermissionDeniedError(err_msg, response=response, body=data) if response.status_code == 404: return _exceptions.NotFoundError(err_msg, response=response, body=data) if response.status_code == 409: return _exceptions.ConflictError(err_msg, response=response, body=data) if response.status_code == 422: return _exceptions.UnprocessableEntityError(err_msg, response=response, body=data) if response.status_code == 429: return _exceptions.RateLimitError(err_msg, response=response, body=data) if response.status_code >= 500: return _exceptions.InternalServerError(err_msg, response=response, body=data) return APIStatusError(err_msg, response=response, body=data) class OpenAIWithRawResponse: def __init__(self, client: OpenAI) -> None: self.completions = resources.CompletionsWithRawResponse(client.completions) self.chat = resources.ChatWithRawResponse(client.chat) self.embeddings = resources.EmbeddingsWithRawResponse(client.embeddings) self.files = resources.FilesWithRawResponse(client.files) self.images = resources.ImagesWithRawResponse(client.images) self.audio = resources.AudioWithRawResponse(client.audio) self.moderations = resources.ModerationsWithRawResponse(client.moderations) self.models = resources.ModelsWithRawResponse(client.models) self.fine_tuning = resources.FineTuningWithRawResponse(client.fine_tuning) self.beta = resources.BetaWithRawResponse(client.beta) class AsyncOpenAIWithRawResponse: def __init__(self, client: AsyncOpenAI) -> None: self.completions = resources.AsyncCompletionsWithRawResponse(client.completions) self.chat = resources.AsyncChatWithRawResponse(client.chat) self.embeddings = resources.AsyncEmbeddingsWithRawResponse(client.embeddings) self.files = resources.AsyncFilesWithRawResponse(client.files) self.images = resources.AsyncImagesWithRawResponse(client.images) self.audio = resources.AsyncAudioWithRawResponse(client.audio) self.moderations = resources.AsyncModerationsWithRawResponse(client.moderations) self.models = resources.AsyncModelsWithRawResponse(client.models) self.fine_tuning = resources.AsyncFineTuningWithRawResponse(client.fine_tuning) self.beta = resources.AsyncBetaWithRawResponse(client.beta) class OpenAIWithStreamedResponse: def __init__(self, client: OpenAI) -> None: self.completions = resources.CompletionsWithStreamingResponse(client.completions) self.chat = resources.ChatWithStreamingResponse(client.chat) self.embeddings = resources.EmbeddingsWithStreamingResponse(client.embeddings) self.files = resources.FilesWithStreamingResponse(client.files) self.images = resources.ImagesWithStreamingResponse(client.images) self.audio = resources.AudioWithStreamingResponse(client.audio) self.moderations = resources.ModerationsWithStreamingResponse(client.moderations) self.models = resources.ModelsWithStreamingResponse(client.models) self.fine_tuning = resources.FineTuningWithStreamingResponse(client.fine_tuning) self.beta = resources.BetaWithStreamingResponse(client.beta) class AsyncOpenAIWithStreamedResponse: def __init__(self, client: AsyncOpenAI) -> None: self.completions = resources.AsyncCompletionsWithStreamingResponse(client.completions) self.chat = resources.AsyncChatWithStreamingResponse(client.chat) self.embeddings = resources.AsyncEmbeddingsWithStreamingResponse(client.embeddings) self.files = resources.AsyncFilesWithStreamingResponse(client.files) self.images = resources.AsyncImagesWithStreamingResponse(client.images) self.audio = resources.AsyncAudioWithStreamingResponse(client.audio) self.moderations = resources.AsyncModerationsWithStreamingResponse(client.moderations) self.models = resources.AsyncModelsWithStreamingResponse(client.models) self.fine_tuning = resources.AsyncFineTuningWithStreamingResponse(client.fine_tuning) self.beta = resources.AsyncBetaWithStreamingResponse(client.beta) Client = OpenAI AsyncClient = AsyncOpenAI openai-python-1.12.0/src/openai/_compat.py000066400000000000000000000143651456126711000204370ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload from datetime import date, datetime from typing_extensions import Self import pydantic from pydantic.fields import FieldInfo from ._types import StrBytesIntFloat _T = TypeVar("_T") _ModelT = TypeVar("_ModelT", bound=pydantic.BaseModel) # --------------- Pydantic v2 compatibility --------------- # Pyright incorrectly reports some of our functions as overriding a method when they don't # pyright: reportIncompatibleMethodOverride=false PYDANTIC_V2 = pydantic.VERSION.startswith("2.") # v1 re-exports if TYPE_CHECKING: def parse_date(value: date | StrBytesIntFloat) -> date: # noqa: ARG001 ... def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: # noqa: ARG001 ... def get_args(t: type[Any]) -> tuple[Any, ...]: # noqa: ARG001 ... def is_union(tp: type[Any] | None) -> bool: # noqa: ARG001 ... def get_origin(t: type[Any]) -> type[Any] | None: # noqa: ARG001 ... def is_literal_type(type_: type[Any]) -> bool: # noqa: ARG001 ... def is_typeddict(type_: type[Any]) -> bool: # noqa: ARG001 ... else: if PYDANTIC_V2: from pydantic.v1.typing import ( get_args as get_args, is_union as is_union, get_origin as get_origin, is_typeddict as is_typeddict, is_literal_type as is_literal_type, ) from pydantic.v1.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime else: from pydantic.typing import ( get_args as get_args, is_union as is_union, get_origin as get_origin, is_typeddict as is_typeddict, is_literal_type as is_literal_type, ) from pydantic.datetime_parse import parse_date as parse_date, parse_datetime as parse_datetime # refactored config if TYPE_CHECKING: from pydantic import ConfigDict as ConfigDict else: if PYDANTIC_V2: from pydantic import ConfigDict else: # TODO: provide an error message here? ConfigDict = None # renamed methods / properties def parse_obj(model: type[_ModelT], value: object) -> _ModelT: if PYDANTIC_V2: return model.model_validate(value) else: return cast(_ModelT, model.parse_obj(value)) # pyright: ignore[reportDeprecated, reportUnnecessaryCast] def field_is_required(field: FieldInfo) -> bool: if PYDANTIC_V2: return field.is_required() return field.required # type: ignore def field_get_default(field: FieldInfo) -> Any: value = field.get_default() if PYDANTIC_V2: from pydantic_core import PydanticUndefined if value == PydanticUndefined: return None return value return value def field_outer_type(field: FieldInfo) -> Any: if PYDANTIC_V2: return field.annotation return field.outer_type_ # type: ignore def get_model_config(model: type[pydantic.BaseModel]) -> Any: if PYDANTIC_V2: return model.model_config return model.__config__ # type: ignore def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]: if PYDANTIC_V2: return model.model_fields return model.__fields__ # type: ignore def model_copy(model: _ModelT) -> _ModelT: if PYDANTIC_V2: return model.model_copy() return model.copy() # type: ignore def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str: if PYDANTIC_V2: return model.model_dump_json(indent=indent) return model.json(indent=indent) # type: ignore def model_dump( model: pydantic.BaseModel, *, exclude_unset: bool = False, exclude_defaults: bool = False, ) -> dict[str, Any]: if PYDANTIC_V2: return model.model_dump( exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, ) return cast( "dict[str, Any]", model.dict( # pyright: ignore[reportDeprecated, reportUnnecessaryCast] exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, ), ) def model_parse(model: type[_ModelT], data: Any) -> _ModelT: if PYDANTIC_V2: return model.model_validate(data) return model.parse_obj(data) # pyright: ignore[reportDeprecated] # generic models if TYPE_CHECKING: class GenericModel(pydantic.BaseModel): ... else: if PYDANTIC_V2: # there no longer needs to be a distinction in v2 but # we still have to create our own subclass to avoid # inconsistent MRO ordering errors class GenericModel(pydantic.BaseModel): ... else: import pydantic.generics class GenericModel(pydantic.generics.GenericModel, pydantic.BaseModel): ... # cached properties if TYPE_CHECKING: cached_property = property # we define a separate type (copied from typeshed) # that represents that `cached_property` is `set`able # at runtime, which differs from `@property`. # # this is a separate type as editors likely special case # `@property` and we don't want to cause issues just to have # more helpful internal types. class typed_cached_property(Generic[_T]): func: Callable[[Any], _T] attrname: str | None def __init__(self, func: Callable[[Any], _T]) -> None: ... @overload def __get__(self, instance: None, owner: type[Any] | None = None) -> Self: ... @overload def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... def __get__(self, instance: object, owner: type[Any] | None = None) -> _T | Self: raise NotImplementedError() def __set_name__(self, owner: type[Any], name: str) -> None: ... # __set__ is not defined at runtime, but @cached_property is designed to be settable def __set__(self, instance: object, value: _T) -> None: ... else: try: from functools import cached_property as cached_property except ImportError: from cached_property import cached_property as cached_property typed_cached_property = cached_property openai-python-1.12.0/src/openai/_constants.py000066400000000000000000000006471456126711000211660ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. import httpx RAW_RESPONSE_HEADER = "X-Stainless-Raw-Response" OVERRIDE_CAST_TO_HEADER = "____stainless_override_cast_to" # default timeout is 10 minutes DEFAULT_TIMEOUT = httpx.Timeout(timeout=600.0, connect=5.0) DEFAULT_MAX_RETRIES = 2 DEFAULT_LIMITS = httpx.Limits(max_connections=100, max_keepalive_connections=20) INITIAL_RETRY_DELAY = 0.5 MAX_RETRY_DELAY = 8.0 openai-python-1.12.0/src/openai/_exceptions.py000066400000000000000000000070361456126711000213320ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Any, Optional, cast from typing_extensions import Literal import httpx from ._utils import is_dict __all__ = [ "BadRequestError", "AuthenticationError", "PermissionDeniedError", "NotFoundError", "ConflictError", "UnprocessableEntityError", "RateLimitError", "InternalServerError", ] class OpenAIError(Exception): pass class APIError(OpenAIError): message: str request: httpx.Request body: object | None """The API response body. If the API responded with a valid JSON structure then this property will be the decoded result. If it isn't a valid JSON structure then this will be the raw response. If there was no response associated with this error then it will be `None`. """ code: Optional[str] = None param: Optional[str] = None type: Optional[str] def __init__(self, message: str, request: httpx.Request, *, body: object | None) -> None: super().__init__(message) self.request = request self.message = message self.body = body if is_dict(body): self.code = cast(Any, body.get("code")) self.param = cast(Any, body.get("param")) self.type = cast(Any, body.get("type")) else: self.code = None self.param = None self.type = None class APIResponseValidationError(APIError): response: httpx.Response status_code: int def __init__(self, response: httpx.Response, body: object | None, *, message: str | None = None) -> None: super().__init__(message or "Data returned by API invalid for expected schema.", response.request, body=body) self.response = response self.status_code = response.status_code class APIStatusError(APIError): """Raised when an API response has a status code of 4xx or 5xx.""" response: httpx.Response status_code: int def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None: super().__init__(message, response.request, body=body) self.response = response self.status_code = response.status_code class APIConnectionError(APIError): def __init__(self, *, message: str = "Connection error.", request: httpx.Request) -> None: super().__init__(message, request, body=None) class APITimeoutError(APIConnectionError): def __init__(self, request: httpx.Request) -> None: super().__init__(message="Request timed out.", request=request) class BadRequestError(APIStatusError): status_code: Literal[400] = 400 # pyright: ignore[reportIncompatibleVariableOverride] class AuthenticationError(APIStatusError): status_code: Literal[401] = 401 # pyright: ignore[reportIncompatibleVariableOverride] class PermissionDeniedError(APIStatusError): status_code: Literal[403] = 403 # pyright: ignore[reportIncompatibleVariableOverride] class NotFoundError(APIStatusError): status_code: Literal[404] = 404 # pyright: ignore[reportIncompatibleVariableOverride] class ConflictError(APIStatusError): status_code: Literal[409] = 409 # pyright: ignore[reportIncompatibleVariableOverride] class UnprocessableEntityError(APIStatusError): status_code: Literal[422] = 422 # pyright: ignore[reportIncompatibleVariableOverride] class RateLimitError(APIStatusError): status_code: Literal[429] = 429 # pyright: ignore[reportIncompatibleVariableOverride] class InternalServerError(APIStatusError): pass openai-python-1.12.0/src/openai/_extras/000077500000000000000000000000001456126711000200775ustar00rootroot00000000000000openai-python-1.12.0/src/openai/_extras/__init__.py000066400000000000000000000001531456126711000222070ustar00rootroot00000000000000from .numpy_proxy import numpy as numpy, has_numpy as has_numpy from .pandas_proxy import pandas as pandas openai-python-1.12.0/src/openai/_extras/_common.py000066400000000000000000000005541456126711000221040ustar00rootroot00000000000000from .._exceptions import OpenAIError INSTRUCTIONS = """ OpenAI error: missing `{library}` This feature requires additional dependencies: $ pip install openai[{extra}] """ def format_instructions(*, library: str, extra: str) -> str: return INSTRUCTIONS.format(library=library, extra=extra) class MissingDependencyError(OpenAIError): pass openai-python-1.12.0/src/openai/_extras/numpy_proxy.py000066400000000000000000000014371456126711000230670ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any from typing_extensions import override from .._utils import LazyProxy from ._common import MissingDependencyError, format_instructions if TYPE_CHECKING: import numpy as numpy NUMPY_INSTRUCTIONS = format_instructions(library="numpy", extra="datalib") class NumpyProxy(LazyProxy[Any]): @override def __load__(self) -> Any: try: import numpy except ImportError as err: raise MissingDependencyError(NUMPY_INSTRUCTIONS) from err return numpy if not TYPE_CHECKING: numpy = NumpyProxy() def has_numpy() -> bool: try: import numpy # noqa: F401 # pyright: ignore[reportUnusedImport] except ImportError: return False return True openai-python-1.12.0/src/openai/_extras/pandas_proxy.py000066400000000000000000000011751456126711000231640ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any from typing_extensions import override from .._utils import LazyProxy from ._common import MissingDependencyError, format_instructions if TYPE_CHECKING: import pandas as pandas PANDAS_INSTRUCTIONS = format_instructions(library="pandas", extra="datalib") class PandasProxy(LazyProxy[Any]): @override def __load__(self) -> Any: try: import pandas except ImportError as err: raise MissingDependencyError(PANDAS_INSTRUCTIONS) from err return pandas if not TYPE_CHECKING: pandas = PandasProxy() openai-python-1.12.0/src/openai/_files.py000066400000000000000000000066161456126711000202560ustar00rootroot00000000000000from __future__ import annotations import io import os import pathlib from typing import overload from typing_extensions import TypeGuard import anyio from ._types import ( FileTypes, FileContent, RequestFiles, HttpxFileTypes, HttpxFileContent, HttpxRequestFiles, ) from ._utils import is_tuple_t, is_mapping_t, is_sequence_t def is_file_content(obj: object) -> TypeGuard[FileContent]: return ( isinstance(obj, bytes) or isinstance(obj, tuple) or isinstance(obj, io.IOBase) or isinstance(obj, os.PathLike) ) def assert_is_file_content(obj: object, *, key: str | None = None) -> None: if not is_file_content(obj): prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`" raise RuntimeError( f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/openai/openai-python/tree/main#file-uploads" ) from None @overload def to_httpx_files(files: None) -> None: ... @overload def to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... def to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: if files is None: return None if is_mapping_t(files): files = {key: _transform_file(file) for key, file in files.items()} elif is_sequence_t(files): files = [(key, _transform_file(file)) for key, file in files] else: raise TypeError(f"Unexpected file type input {type(files)}, expected mapping or sequence") return files def _transform_file(file: FileTypes) -> HttpxFileTypes: if is_file_content(file): if isinstance(file, os.PathLike): path = pathlib.Path(file) return (path.name, path.read_bytes()) return file if is_tuple_t(file): return (file[0], _read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") def _read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return pathlib.Path(file).read_bytes() return file @overload async def async_to_httpx_files(files: None) -> None: ... @overload async def async_to_httpx_files(files: RequestFiles) -> HttpxRequestFiles: ... async def async_to_httpx_files(files: RequestFiles | None) -> HttpxRequestFiles | None: if files is None: return None if is_mapping_t(files): files = {key: await _async_transform_file(file) for key, file in files.items()} elif is_sequence_t(files): files = [(key, await _async_transform_file(file)) for key, file in files] else: raise TypeError("Unexpected file type input {type(files)}, expected mapping or sequence") return files async def _async_transform_file(file: FileTypes) -> HttpxFileTypes: if is_file_content(file): if isinstance(file, os.PathLike): path = anyio.Path(file) return (path.name, await path.read_bytes()) return file if is_tuple_t(file): return (file[0], await _async_read_file_content(file[1]), *file[2:]) raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple") async def _async_read_file_content(file: FileContent) -> HttpxFileContent: if isinstance(file, os.PathLike): return await anyio.Path(file).read_bytes() return file openai-python-1.12.0/src/openai/_legacy_response.py000066400000000000000000000350671456126711000223400ustar00rootroot00000000000000from __future__ import annotations import os import inspect import logging import datetime import functools from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, Iterator, AsyncIterator, cast, overload from typing_extensions import Awaitable, ParamSpec, override, deprecated, get_origin import anyio import httpx import pydantic from ._types import NoneType from ._utils import is_given from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type from ._exceptions import APIResponseValidationError if TYPE_CHECKING: from ._models import FinalRequestOptions from ._base_client import BaseClient P = ParamSpec("P") R = TypeVar("R") _T = TypeVar("_T") log: logging.Logger = logging.getLogger(__name__) class LegacyAPIResponse(Generic[R]): """This is a legacy class as it will be replaced by `APIResponse` and `AsyncAPIResponse` in the `_response.py` file in the next major release. For the sync client this will mostly be the same with the exception of `content` & `text` will be methods instead of properties. In the async client, all methods will be async. A migration script will be provided & the migration in general should be smooth. """ _cast_to: type[R] _client: BaseClient[Any, Any] _parsed_by_type: dict[type[Any], Any] _stream: bool _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None _options: FinalRequestOptions http_response: httpx.Response def __init__( self, *, raw: httpx.Response, cast_to: type[R], client: BaseClient[Any, Any], stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, options: FinalRequestOptions, ) -> None: self._cast_to = cast_to self._client = client self._parsed_by_type = {} self._stream = stream self._stream_cls = stream_cls self._options = options self.http_response = raw @overload def parse(self, *, to: type[_T]) -> _T: ... @overload def parse(self) -> R: ... def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. NOTE: For the async client: this will become a coroutine in the next major version. For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. You can customise the type that the response is parsed into through the `to` argument, e.g. ```py from openai import BaseModel class MyModel(BaseModel): foo: str obj = response.parse(to=MyModel) print(obj.foo) ``` We support parsing: - `BaseModel` - `dict` - `list` - `Union` - `str` - `httpx.Response` """ cache_key = to if to is not None else self._cast_to cached = self._parsed_by_type.get(cache_key) if cached is not None: return cached # type: ignore[no-any-return] parsed = self._parse(to=to) if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) self._parsed_by_type[cache_key] = parsed return parsed @property def headers(self) -> httpx.Headers: return self.http_response.headers @property def http_request(self) -> httpx.Request: return self.http_response.request @property def status_code(self) -> int: return self.http_response.status_code @property def url(self) -> httpx.URL: return self.http_response.url @property def method(self) -> str: return self.http_request.method @property def content(self) -> bytes: """Return the binary response content. NOTE: this will be removed in favour of `.read()` in the next major version. """ return self.http_response.content @property def text(self) -> str: """Return the decoded response content. NOTE: this will be turned into a method in the next major version. """ return self.http_response.text @property def http_version(self) -> str: return self.http_response.http_version @property def is_closed(self) -> bool: return self.http_response.is_closed @property def elapsed(self) -> datetime.timedelta: """The time taken for the complete request/response cycle to complete.""" return self.http_response.elapsed def _parse(self, *, to: type[_T] | None = None) -> R | _T: if self._stream: if to: if not is_stream_class_type(to): raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}") return cast( _T, to( cast_to=extract_stream_chunk_type( to, failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]", ), response=self.http_response, client=cast(Any, self._client), ), ) if self._stream_cls: return cast( R, self._stream_cls( cast_to=extract_stream_chunk_type(self._stream_cls), response=self.http_response, client=cast(Any, self._client), ), ) stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls) if stream_cls is None: raise MissingStreamClassError() return cast( R, stream_cls( cast_to=self._cast_to, response=self.http_response, client=cast(Any, self._client), ), ) cast_to = to if to is not None else self._cast_to if cast_to is NoneType: return cast(R, None) response = self.http_response if cast_to == str: return cast(R, response.text) origin = get_origin(cast_to) or cast_to if inspect.isclass(origin) and issubclass(origin, HttpxBinaryResponseContent): return cast(R, cast_to(response)) # type: ignore if origin == LegacyAPIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") if inspect.isclass(origin) and issubclass(origin, httpx.Response): # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response # and pass that class to our request functions. We cannot change the variance to be either # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct # the response class ourselves but that is something that should be supported directly in httpx # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. if cast_to != httpx.Response: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( cast_to is not object and not origin is list and not origin is dict and not origin is Union and not issubclass(origin, BaseModel) ): raise RuntimeError( f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." ) # split is required to handle cases where additional information is included # in the response, e.g. application/json; charset=utf-8 content_type, *_ = response.headers.get("content-type", "*").split(";") if content_type != "application/json": if is_basemodel(cast_to): try: data = response.json() except Exception as exc: log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) else: return self._client._process_response_data( data=data, cast_to=cast_to, # type: ignore response=response, ) if self._client._strict_response_validation: raise APIResponseValidationError( response=response, message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", body=response.text, ) # If the API responds with content that isn't JSON then we just return # the (decoded) text without performing any parsing so that you can still # handle the response however you need to. return response.text # type: ignore data = response.json() return self._client._process_response_data( data=data, cast_to=cast_to, # type: ignore response=response, ) @override def __repr__(self) -> str: return f"" class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", ) def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, LegacyAPIResponse[R]]: """Higher order function that takes one of our bound API methods and wraps it to support returning the raw `APIResponse` object directly. """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "true" kwargs["extra_headers"] = extra_headers return cast(LegacyAPIResponse[R], func(*args, **kwargs)) return wrapped def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[LegacyAPIResponse[R]]]: """Higher order function that takes one of our bound API methods and wraps it to support returning the raw `APIResponse` object directly. """ @functools.wraps(func) async def wrapped(*args: P.args, **kwargs: P.kwargs) -> LegacyAPIResponse[R]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "true" kwargs["extra_headers"] = extra_headers return cast(LegacyAPIResponse[R], await func(*args, **kwargs)) return wrapped class HttpxBinaryResponseContent: response: httpx.Response def __init__(self, response: httpx.Response) -> None: self.response = response @property def content(self) -> bytes: return self.response.content @property def text(self) -> str: return self.response.text @property def encoding(self) -> str | None: return self.response.encoding @property def charset_encoding(self) -> str | None: return self.response.charset_encoding def json(self, **kwargs: Any) -> Any: return self.response.json(**kwargs) def read(self) -> bytes: return self.response.read() def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: return self.response.iter_bytes(chunk_size) def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: return self.response.iter_text(chunk_size) def iter_lines(self) -> Iterator[str]: return self.response.iter_lines() def iter_raw(self, chunk_size: int | None = None) -> Iterator[bytes]: return self.response.iter_raw(chunk_size) def write_to_file( self, file: str | os.PathLike[str], ) -> None: """Write the output to the given file. Accepts a filename or any path-like object, e.g. pathlib.Path Note: if you want to stream the data to the file instead of writing all at once then you should use `.with_streaming_response` when making the API request, e.g. `client.with_streaming_response.foo().stream_to_file('my_filename.txt')` """ with open(file, mode="wb") as f: for data in self.response.iter_bytes(): f.write(data) @deprecated( "Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead" ) def stream_to_file( self, file: str | os.PathLike[str], *, chunk_size: int | None = None, ) -> None: with open(file, mode="wb") as f: for data in self.response.iter_bytes(chunk_size): f.write(data) def close(self) -> None: return self.response.close() async def aread(self) -> bytes: return await self.response.aread() async def aiter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: return self.response.aiter_bytes(chunk_size) async def aiter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: return self.response.aiter_text(chunk_size) async def aiter_lines(self) -> AsyncIterator[str]: return self.response.aiter_lines() async def aiter_raw(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: return self.response.aiter_raw(chunk_size) @deprecated( "Due to a bug, this method doesn't actually stream the response content, `.with_streaming_response.method()` should be used instead" ) async def astream_to_file( self, file: str | os.PathLike[str], *, chunk_size: int | None = None, ) -> None: path = anyio.Path(file) async with await path.open(mode="wb") as f: async for data in self.response.aiter_bytes(chunk_size): await f.write(data) async def aclose(self) -> None: return await self.response.aclose() openai-python-1.12.0/src/openai/_models.py000066400000000000000000000400321456126711000204250ustar00rootroot00000000000000from __future__ import annotations import inspect from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast from datetime import date, datetime from typing_extensions import ( Unpack, Literal, ClassVar, Protocol, Required, TypedDict, final, override, runtime_checkable, ) import pydantic import pydantic.generics from pydantic.fields import FieldInfo from ._types import ( Body, IncEx, Query, ModelT, Headers, Timeout, NotGiven, AnyMapping, HttpxRequestFiles, ) from ._utils import is_list, is_given, is_mapping, parse_date, parse_datetime, strip_not_given from ._compat import ( PYDANTIC_V2, ConfigDict, GenericModel as BaseGenericModel, get_args, is_union, parse_obj, get_origin, is_literal_type, get_model_config, get_model_fields, field_get_default, ) from ._constants import RAW_RESPONSE_HEADER __all__ = ["BaseModel", "GenericModel"] _T = TypeVar("_T") @runtime_checkable class _ConfigProtocol(Protocol): allow_population_by_field_name: bool class BaseModel(pydantic.BaseModel): if PYDANTIC_V2: model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow") else: @property @override def model_fields_set(self) -> set[str]: # a forwards-compat shim for pydantic v2 return self.__fields_set__ # type: ignore class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] extra: Any = pydantic.Extra.allow # type: ignore @override def __str__(self) -> str: # mypy complains about an invalid self arg return f'{self.__repr_name__()}({self.__repr_str__(", ")})' # type: ignore[misc] # Override the 'construct' method in a way that supports recursive parsing without validation. # Based on https://github.com/samuelcolvin/pydantic/issues/1168#issuecomment-817742836. @classmethod @override def construct( cls: Type[ModelT], _fields_set: set[str] | None = None, **values: object, ) -> ModelT: m = cls.__new__(cls) fields_values: dict[str, object] = {} config = get_model_config(cls) populate_by_name = ( config.allow_population_by_field_name if isinstance(config, _ConfigProtocol) else config.get("populate_by_name") ) if _fields_set is None: _fields_set = set() model_fields = get_model_fields(cls) for name, field in model_fields.items(): key = field.alias if key is None or (key not in values and populate_by_name): key = name if key in values: fields_values[name] = _construct_field(value=values[key], field=field, key=key) _fields_set.add(name) else: fields_values[name] = field_get_default(field) _extra = {} for key, value in values.items(): if key not in model_fields: if PYDANTIC_V2: _extra[key] = value else: _fields_set.add(key) fields_values[key] = value object.__setattr__(m, "__dict__", fields_values) if PYDANTIC_V2: # these properties are copied from Pydantic's `model_construct()` method object.__setattr__(m, "__pydantic_private__", None) object.__setattr__(m, "__pydantic_extra__", _extra) object.__setattr__(m, "__pydantic_fields_set__", _fields_set) else: # init_private_attributes() does not exist in v2 m._init_private_attributes() # type: ignore # copied from Pydantic v1's `construct()` method object.__setattr__(m, "__fields_set__", _fields_set) return m if not TYPE_CHECKING: # type checkers incorrectly complain about this assignment # because the type signatures are technically different # although not in practice model_construct = construct if not PYDANTIC_V2: # we define aliases for some of the new pydantic v2 methods so # that we can just document these methods without having to specify # a specific pydantic version as some users may not know which # pydantic version they are currently using @override def model_dump( self, *, mode: Literal["json", "python"] | str = "python", include: IncEx = None, exclude: IncEx = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> dict[str, Any]: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. Args: mode: The mode in which `to_python` should run. If mode is 'json', the dictionary will only contain JSON serializable types. If mode is 'python', the dictionary may contain any Python objects. include: A list of fields to include in the output. exclude: A list of fields to exclude from the output. by_alias: Whether to use the field's alias in the dictionary key if defined. exclude_unset: Whether to exclude fields that are unset or None from the output. exclude_defaults: Whether to exclude fields that are set to their default value from the output. exclude_none: Whether to exclude fields that have a value of `None` from the output. round_trip: Whether to enable serialization and deserialization round-trip support. warnings: Whether to log warnings when invalid fields are encountered. Returns: A dictionary representation of the model. """ if mode != "python": raise ValueError("mode is only supported in Pydantic v2") if round_trip != False: raise ValueError("round_trip is only supported in Pydantic v2") if warnings != True: raise ValueError("warnings is only supported in Pydantic v2") return super().dict( # pyright: ignore[reportDeprecated] include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) @override def model_dump_json( self, *, indent: int | None = None, include: IncEx = None, exclude: IncEx = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> str: """Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json Generates a JSON representation of the model using Pydantic's `to_json` method. Args: indent: Indentation to use in the JSON output. If None is passed, the output will be compact. include: Field(s) to include in the JSON output. Can take either a string or set of strings. exclude: Field(s) to exclude from the JSON output. Can take either a string or set of strings. by_alias: Whether to serialize using field aliases. exclude_unset: Whether to exclude fields that have not been explicitly set. exclude_defaults: Whether to exclude fields that have the default value. exclude_none: Whether to exclude fields that have a value of `None`. round_trip: Whether to use serialization/deserialization between JSON and class instance. warnings: Whether to show any warnings that occurred during serialization. Returns: A JSON string representation of the model. """ if round_trip != False: raise ValueError("round_trip is only supported in Pydantic v2") if warnings != True: raise ValueError("warnings is only supported in Pydantic v2") return super().json( # type: ignore[reportDeprecated] indent=indent, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, ) def _construct_field(value: object, field: FieldInfo, key: str) -> object: if value is None: return field_get_default(field) if PYDANTIC_V2: type_ = field.annotation else: type_ = cast(type, field.outer_type_) # type: ignore if type_ is None: raise RuntimeError(f"Unexpected field type is None for {key}") return construct_type(value=value, type_=type_) def is_basemodel(type_: type) -> bool: """Returns whether or not the given type is either a `BaseModel` or a union of `BaseModel`""" origin = get_origin(type_) or type_ if is_union(type_): for variant in get_args(type_): if is_basemodel(variant): return True return False return issubclass(origin, BaseModel) or issubclass(origin, GenericModel) def construct_type(*, value: object, type_: type) -> object: """Loose coercion to the expected type with construction of nested values. If the given value does not match the expected type then it is returned as-is. """ # we need to use the origin class for any types that are subscripted generics # e.g. Dict[str, object] origin = get_origin(type_) or type_ args = get_args(type_) if is_union(origin): try: return validate_type(type_=type_, value=value) except Exception: pass # if the data is not valid, use the first variant that doesn't fail while deserializing for variant in args: try: return construct_type(value=value, type_=variant) except Exception: continue raise RuntimeError(f"Could not convert data into a valid instance of {type_}") if origin == dict: if not is_mapping(value): return value _, items_type = get_args(type_) # Dict[_, items_type] return {key: construct_type(value=item, type_=items_type) for key, item in value.items()} if not is_literal_type(type_) and (issubclass(origin, BaseModel) or issubclass(origin, GenericModel)): if is_list(value): return [cast(Any, type_).construct(**entry) if is_mapping(entry) else entry for entry in value] if is_mapping(value): if issubclass(type_, BaseModel): return type_.construct(**value) # type: ignore[arg-type] return cast(Any, type_).construct(**value) if origin == list: if not is_list(value): return value inner_type = args[0] # List[inner_type] return [construct_type(value=entry, type_=inner_type) for entry in value] if origin == float: if isinstance(value, int): coerced = float(value) if coerced != value: return value return coerced return value if type_ == datetime: try: return parse_datetime(value) # type: ignore except Exception: return value if type_ == date: try: return parse_date(value) # type: ignore except Exception: return value return value def validate_type(*, type_: type[_T], value: object) -> _T: """Strict validation that the given value matches the expected type""" if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): return cast(_T, parse_obj(type_, value)) return cast(_T, _validate_non_model_type(type_=type_, value=value)) # our use of subclasssing here causes weirdness for type checkers, # so we just pretend that we don't subclass if TYPE_CHECKING: GenericModel = BaseModel else: class GenericModel(BaseGenericModel, BaseModel): pass if PYDANTIC_V2: from pydantic import TypeAdapter def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: return TypeAdapter(type_).validate_python(value) elif not TYPE_CHECKING: # TODO: condition is weird class RootModel(GenericModel, Generic[_T]): """Used as a placeholder to easily convert runtime types to a Pydantic format to provide validation. For example: ```py validated = RootModel[int](__root__="5").__root__ # validated: 5 ``` """ __root__: _T def _validate_non_model_type(*, type_: type[_T], value: object) -> _T: model = _create_pydantic_model(type_).validate(value) return cast(_T, model.__root__) def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]: return RootModel[type_] # type: ignore class FinalRequestOptionsInput(TypedDict, total=False): method: Required[str] url: Required[str] params: Query headers: Headers max_retries: int timeout: float | Timeout | None files: HttpxRequestFiles | None idempotency_key: str json_data: Body extra_json: AnyMapping @final class FinalRequestOptions(pydantic.BaseModel): method: str url: str params: Query = {} headers: Union[Headers, NotGiven] = NotGiven() max_retries: Union[int, NotGiven] = NotGiven() timeout: Union[float, Timeout, None, NotGiven] = NotGiven() files: Union[HttpxRequestFiles, None] = None idempotency_key: Union[str, None] = None post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven() # It should be noted that we cannot use `json` here as that would override # a BaseModel method in an incompatible fashion. json_data: Union[Body, None] = None extra_json: Union[AnyMapping, None] = None if PYDANTIC_V2: model_config: ClassVar[ConfigDict] = ConfigDict(arbitrary_types_allowed=True) else: class Config(pydantic.BaseConfig): # pyright: ignore[reportDeprecated] arbitrary_types_allowed: bool = True def get_max_retries(self, max_retries: int) -> int: if isinstance(self.max_retries, NotGiven): return max_retries return self.max_retries def _strip_raw_response_header(self) -> None: if not is_given(self.headers): return if self.headers.get(RAW_RESPONSE_HEADER): self.headers = {**self.headers} self.headers.pop(RAW_RESPONSE_HEADER) # override the `construct` method so that we can run custom transformations. # this is necessary as we don't want to do any actual runtime type checking # (which means we can't use validators) but we do want to ensure that `NotGiven` # values are not present # # type ignore required because we're adding explicit types to `**values` @classmethod def construct( # type: ignore cls, _fields_set: set[str] | None = None, **values: Unpack[FinalRequestOptionsInput], ) -> FinalRequestOptions: kwargs: dict[str, Any] = { # we unconditionally call `strip_not_given` on any value # as it will just ignore any non-mapping types key: strip_not_given(value) for key, value in values.items() } if PYDANTIC_V2: return super().model_construct(_fields_set, **kwargs) return cast(FinalRequestOptions, super().construct(_fields_set, **kwargs)) # pyright: ignore[reportDeprecated] if not TYPE_CHECKING: # type checkers incorrectly complain about this assignment model_construct = construct openai-python-1.12.0/src/openai/_module_client.py000066400000000000000000000043501456126711000217700ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import override from . import resources, _load_client from ._utils import LazyProxy class ChatProxy(LazyProxy[resources.Chat]): @override def __load__(self) -> resources.Chat: return _load_client().chat class BetaProxy(LazyProxy[resources.Beta]): @override def __load__(self) -> resources.Beta: return _load_client().beta class FilesProxy(LazyProxy[resources.Files]): @override def __load__(self) -> resources.Files: return _load_client().files class AudioProxy(LazyProxy[resources.Audio]): @override def __load__(self) -> resources.Audio: return _load_client().audio class ImagesProxy(LazyProxy[resources.Images]): @override def __load__(self) -> resources.Images: return _load_client().images class ModelsProxy(LazyProxy[resources.Models]): @override def __load__(self) -> resources.Models: return _load_client().models class EmbeddingsProxy(LazyProxy[resources.Embeddings]): @override def __load__(self) -> resources.Embeddings: return _load_client().embeddings class CompletionsProxy(LazyProxy[resources.Completions]): @override def __load__(self) -> resources.Completions: return _load_client().completions class ModerationsProxy(LazyProxy[resources.Moderations]): @override def __load__(self) -> resources.Moderations: return _load_client().moderations class FineTuningProxy(LazyProxy[resources.FineTuning]): @override def __load__(self) -> resources.FineTuning: return _load_client().fine_tuning chat: resources.Chat = ChatProxy().__as_proxied__() beta: resources.Beta = BetaProxy().__as_proxied__() files: resources.Files = FilesProxy().__as_proxied__() audio: resources.Audio = AudioProxy().__as_proxied__() images: resources.Images = ImagesProxy().__as_proxied__() models: resources.Models = ModelsProxy().__as_proxied__() embeddings: resources.Embeddings = EmbeddingsProxy().__as_proxied__() completions: resources.Completions = CompletionsProxy().__as_proxied__() moderations: resources.Moderations = ModerationsProxy().__as_proxied__() fine_tuning: resources.FineTuning = FineTuningProxy().__as_proxied__() openai-python-1.12.0/src/openai/_qs.py000066400000000000000000000113561456126711000175740ustar00rootroot00000000000000from __future__ import annotations from typing import Any, List, Tuple, Union, Mapping, TypeVar from urllib.parse import parse_qs, urlencode from typing_extensions import Literal, get_args from ._types import NOT_GIVEN, NotGiven, NotGivenOr from ._utils import flatten _T = TypeVar("_T") ArrayFormat = Literal["comma", "repeat", "indices", "brackets"] NestedFormat = Literal["dots", "brackets"] PrimitiveData = Union[str, int, float, bool, None] # this should be Data = Union[PrimitiveData, "List[Data]", "Tuple[Data]", "Mapping[str, Data]"] # https://github.com/microsoft/pyright/issues/3555 Data = Union[PrimitiveData, List[Any], Tuple[Any], "Mapping[str, Any]"] Params = Mapping[str, Data] class Querystring: array_format: ArrayFormat nested_format: NestedFormat def __init__( self, *, array_format: ArrayFormat = "repeat", nested_format: NestedFormat = "brackets", ) -> None: self.array_format = array_format self.nested_format = nested_format def parse(self, query: str) -> Mapping[str, object]: # Note: custom format syntax is not supported yet return parse_qs(query) def stringify( self, params: Params, *, array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, ) -> str: return urlencode( self.stringify_items( params, array_format=array_format, nested_format=nested_format, ) ) def stringify_items( self, params: Params, *, array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, ) -> list[tuple[str, str]]: opts = Options( qs=self, array_format=array_format, nested_format=nested_format, ) return flatten([self._stringify_item(key, value, opts) for key, value in params.items()]) def _stringify_item( self, key: str, value: Data, opts: Options, ) -> list[tuple[str, str]]: if isinstance(value, Mapping): items: list[tuple[str, str]] = [] nested_format = opts.nested_format for subkey, subvalue in value.items(): items.extend( self._stringify_item( # TODO: error if unknown format f"{key}.{subkey}" if nested_format == "dots" else f"{key}[{subkey}]", subvalue, opts, ) ) return items if isinstance(value, (list, tuple)): array_format = opts.array_format if array_format == "comma": return [ ( key, ",".join(self._primitive_value_to_str(item) for item in value if item is not None), ), ] elif array_format == "repeat": items = [] for item in value: items.extend(self._stringify_item(key, item, opts)) return items elif array_format == "indices": raise NotImplementedError("The array indices format is not supported yet") elif array_format == "brackets": items = [] key = key + "[]" for item in value: items.extend(self._stringify_item(key, item, opts)) return items else: raise NotImplementedError( f"Unknown array_format value: {array_format}, choose from {', '.join(get_args(ArrayFormat))}" ) serialised = self._primitive_value_to_str(value) if not serialised: return [] return [(key, serialised)] def _primitive_value_to_str(self, value: PrimitiveData) -> str: # copied from httpx if value is True: return "true" elif value is False: return "false" elif value is None: return "" return str(value) _qs = Querystring() parse = _qs.parse stringify = _qs.stringify stringify_items = _qs.stringify_items class Options: array_format: ArrayFormat nested_format: NestedFormat def __init__( self, qs: Querystring = _qs, *, array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN, nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN, ) -> None: self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format openai-python-1.12.0/src/openai/_resource.py000066400000000000000000000020561456126711000207750ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import time import asyncio from typing import TYPE_CHECKING if TYPE_CHECKING: from ._client import OpenAI, AsyncOpenAI class SyncAPIResource: _client: OpenAI def __init__(self, client: OpenAI) -> None: self._client = client self._get = client.get self._post = client.post self._patch = client.patch self._put = client.put self._delete = client.delete self._get_api_list = client.get_api_list def _sleep(self, seconds: float) -> None: time.sleep(seconds) class AsyncAPIResource: _client: AsyncOpenAI def __init__(self, client: AsyncOpenAI) -> None: self._client = client self._get = client.get self._post = client.post self._patch = client.patch self._put = client.put self._delete = client.delete self._get_api_list = client.get_api_list async def _sleep(self, seconds: float) -> None: await asyncio.sleep(seconds) openai-python-1.12.0/src/openai/_response.py000066400000000000000000000664651456126711000210220ustar00rootroot00000000000000from __future__ import annotations import os import inspect import logging import datetime import functools from types import TracebackType from typing import ( TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, Iterator, AsyncIterator, cast, overload, ) from typing_extensions import Awaitable, ParamSpec, override, get_origin import anyio import httpx import pydantic from ._types import NoneType from ._utils import is_given, extract_type_var_from_base from ._models import BaseModel, is_basemodel from ._constants import RAW_RESPONSE_HEADER, OVERRIDE_CAST_TO_HEADER from ._streaming import Stream, AsyncStream, is_stream_class_type, extract_stream_chunk_type from ._exceptions import OpenAIError, APIResponseValidationError if TYPE_CHECKING: from ._models import FinalRequestOptions from ._base_client import BaseClient P = ParamSpec("P") R = TypeVar("R") _T = TypeVar("_T") _APIResponseT = TypeVar("_APIResponseT", bound="APIResponse[Any]") _AsyncAPIResponseT = TypeVar("_AsyncAPIResponseT", bound="AsyncAPIResponse[Any]") log: logging.Logger = logging.getLogger(__name__) class BaseAPIResponse(Generic[R]): _cast_to: type[R] _client: BaseClient[Any, Any] _parsed_by_type: dict[type[Any], Any] _is_sse_stream: bool _stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None _options: FinalRequestOptions http_response: httpx.Response def __init__( self, *, raw: httpx.Response, cast_to: type[R], client: BaseClient[Any, Any], stream: bool, stream_cls: type[Stream[Any]] | type[AsyncStream[Any]] | None, options: FinalRequestOptions, ) -> None: self._cast_to = cast_to self._client = client self._parsed_by_type = {} self._is_sse_stream = stream self._stream_cls = stream_cls self._options = options self.http_response = raw @property def headers(self) -> httpx.Headers: return self.http_response.headers @property def http_request(self) -> httpx.Request: """Returns the httpx Request instance associated with the current response.""" return self.http_response.request @property def status_code(self) -> int: return self.http_response.status_code @property def url(self) -> httpx.URL: """Returns the URL for which the request was made.""" return self.http_response.url @property def method(self) -> str: return self.http_request.method @property def http_version(self) -> str: return self.http_response.http_version @property def elapsed(self) -> datetime.timedelta: """The time taken for the complete request/response cycle to complete.""" return self.http_response.elapsed @property def is_closed(self) -> bool: """Whether or not the response body has been closed. If this is False then there is response data that has not been read yet. You must either fully consume the response body or call `.close()` before discarding the response to prevent resource leaks. """ return self.http_response.is_closed @override def __repr__(self) -> str: return ( f"<{self.__class__.__name__} [{self.status_code} {self.http_response.reason_phrase}] type={self._cast_to}>" ) def _parse(self, *, to: type[_T] | None = None) -> R | _T: if self._is_sse_stream: if to: if not is_stream_class_type(to): raise TypeError(f"Expected custom parse type to be a subclass of {Stream} or {AsyncStream}") return cast( _T, to( cast_to=extract_stream_chunk_type( to, failure_message="Expected custom stream type to be passed with a type argument, e.g. Stream[ChunkType]", ), response=self.http_response, client=cast(Any, self._client), ), ) if self._stream_cls: return cast( R, self._stream_cls( cast_to=extract_stream_chunk_type(self._stream_cls), response=self.http_response, client=cast(Any, self._client), ), ) stream_cls = cast("type[Stream[Any]] | type[AsyncStream[Any]] | None", self._client._default_stream_cls) if stream_cls is None: raise MissingStreamClassError() return cast( R, stream_cls( cast_to=self._cast_to, response=self.http_response, client=cast(Any, self._client), ), ) cast_to = to if to is not None else self._cast_to if cast_to is NoneType: return cast(R, None) response = self.http_response if cast_to == str: return cast(R, response.text) if cast_to == bytes: return cast(R, response.content) origin = get_origin(cast_to) or cast_to # handle the legacy binary response case if inspect.isclass(cast_to) and cast_to.__name__ == "HttpxBinaryResponseContent": return cast(R, cast_to(response)) # type: ignore if origin == APIResponse: raise RuntimeError("Unexpected state - cast_to is `APIResponse`") if inspect.isclass(origin) and issubclass(origin, httpx.Response): # Because of the invariance of our ResponseT TypeVar, users can subclass httpx.Response # and pass that class to our request functions. We cannot change the variance to be either # covariant or contravariant as that makes our usage of ResponseT illegal. We could construct # the response class ourselves but that is something that should be supported directly in httpx # as it would be easy to incorrectly construct the Response object due to the multitude of arguments. if cast_to != httpx.Response: raise ValueError(f"Subclasses of httpx.Response cannot be passed to `cast_to`") return cast(R, response) if inspect.isclass(origin) and not issubclass(origin, BaseModel) and issubclass(origin, pydantic.BaseModel): raise TypeError("Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`") if ( cast_to is not object and not origin is list and not origin is dict and not origin is Union and not issubclass(origin, BaseModel) ): raise RuntimeError( f"Unsupported type, expected {cast_to} to be a subclass of {BaseModel}, {dict}, {list}, {Union}, {NoneType}, {str} or {httpx.Response}." ) # split is required to handle cases where additional information is included # in the response, e.g. application/json; charset=utf-8 content_type, *_ = response.headers.get("content-type", "*").split(";") if content_type != "application/json": if is_basemodel(cast_to): try: data = response.json() except Exception as exc: log.debug("Could not read JSON from response data due to %s - %s", type(exc), exc) else: return self._client._process_response_data( data=data, cast_to=cast_to, # type: ignore response=response, ) if self._client._strict_response_validation: raise APIResponseValidationError( response=response, message=f"Expected Content-Type response header to be `application/json` but received `{content_type}` instead.", body=response.text, ) # If the API responds with content that isn't JSON then we just return # the (decoded) text without performing any parsing so that you can still # handle the response however you need to. return response.text # type: ignore data = response.json() return self._client._process_response_data( data=data, cast_to=cast_to, # type: ignore response=response, ) class APIResponse(BaseAPIResponse[R]): @overload def parse(self, *, to: type[_T]) -> _T: ... @overload def parse(self) -> R: ... def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. You can customise the type that the response is parsed into through the `to` argument, e.g. ```py from openai import BaseModel class MyModel(BaseModel): foo: str obj = response.parse(to=MyModel) print(obj.foo) ``` We support parsing: - `BaseModel` - `dict` - `list` - `Union` - `str` - `httpx.Response` """ cache_key = to if to is not None else self._cast_to cached = self._parsed_by_type.get(cache_key) if cached is not None: return cached # type: ignore[no-any-return] if not self._is_sse_stream: self.read() parsed = self._parse(to=to) if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) self._parsed_by_type[cache_key] = parsed return parsed def read(self) -> bytes: """Read and return the binary response content.""" try: return self.http_response.read() except httpx.StreamConsumed as exc: # The default error raised by httpx isn't very # helpful in our case so we re-raise it with # a different error message. raise StreamAlreadyConsumed() from exc def text(self) -> str: """Read and decode the response content into a string.""" self.read() return self.http_response.text def json(self) -> object: """Read and decode the JSON response content.""" self.read() return self.http_response.json() def close(self) -> None: """Close the response and release the connection. Automatically called if the response body is read to completion. """ self.http_response.close() def iter_bytes(self, chunk_size: int | None = None) -> Iterator[bytes]: """ A byte-iterator over the decoded response content. This automatically handles gzip, deflate and brotli encoded responses. """ for chunk in self.http_response.iter_bytes(chunk_size): yield chunk def iter_text(self, chunk_size: int | None = None) -> Iterator[str]: """A str-iterator over the decoded response content that handles both gzip, deflate, etc but also detects the content's string encoding. """ for chunk in self.http_response.iter_text(chunk_size): yield chunk def iter_lines(self) -> Iterator[str]: """Like `iter_text()` but will only yield chunks for each line""" for chunk in self.http_response.iter_lines(): yield chunk class AsyncAPIResponse(BaseAPIResponse[R]): @overload async def parse(self, *, to: type[_T]) -> _T: ... @overload async def parse(self) -> R: ... async def parse(self, *, to: type[_T] | None = None) -> R | _T: """Returns the rich python representation of this response's data. For lower-level control, see `.read()`, `.json()`, `.iter_bytes()`. You can customise the type that the response is parsed into through the `to` argument, e.g. ```py from openai import BaseModel class MyModel(BaseModel): foo: str obj = response.parse(to=MyModel) print(obj.foo) ``` We support parsing: - `BaseModel` - `dict` - `list` - `Union` - `str` - `httpx.Response` """ cache_key = to if to is not None else self._cast_to cached = self._parsed_by_type.get(cache_key) if cached is not None: return cached # type: ignore[no-any-return] if not self._is_sse_stream: await self.read() parsed = self._parse(to=to) if is_given(self._options.post_parser): parsed = self._options.post_parser(parsed) self._parsed_by_type[cache_key] = parsed return parsed async def read(self) -> bytes: """Read and return the binary response content.""" try: return await self.http_response.aread() except httpx.StreamConsumed as exc: # the default error raised by httpx isn't very # helpful in our case so we re-raise it with # a different error message raise StreamAlreadyConsumed() from exc async def text(self) -> str: """Read and decode the response content into a string.""" await self.read() return self.http_response.text async def json(self) -> object: """Read and decode the JSON response content.""" await self.read() return self.http_response.json() async def close(self) -> None: """Close the response and release the connection. Automatically called if the response body is read to completion. """ await self.http_response.aclose() async def iter_bytes(self, chunk_size: int | None = None) -> AsyncIterator[bytes]: """ A byte-iterator over the decoded response content. This automatically handles gzip, deflate and brotli encoded responses. """ async for chunk in self.http_response.aiter_bytes(chunk_size): yield chunk async def iter_text(self, chunk_size: int | None = None) -> AsyncIterator[str]: """A str-iterator over the decoded response content that handles both gzip, deflate, etc but also detects the content's string encoding. """ async for chunk in self.http_response.aiter_text(chunk_size): yield chunk async def iter_lines(self) -> AsyncIterator[str]: """Like `iter_text()` but will only yield chunks for each line""" async for chunk in self.http_response.aiter_lines(): yield chunk class BinaryAPIResponse(APIResponse[bytes]): """Subclass of APIResponse providing helpers for dealing with binary data. Note: If you want to stream the response data instead of eagerly reading it all at once then you should use `.with_streaming_response` when making the API request, e.g. `.with_streaming_response.get_binary_response()` """ def write_to_file( self, file: str | os.PathLike[str], ) -> None: """Write the output to the given file. Accepts a filename or any path-like object, e.g. pathlib.Path Note: if you want to stream the data to the file instead of writing all at once then you should use `.with_streaming_response` when making the API request, e.g. `.with_streaming_response.get_binary_response()` """ with open(file, mode="wb") as f: for data in self.iter_bytes(): f.write(data) class AsyncBinaryAPIResponse(AsyncAPIResponse[bytes]): """Subclass of APIResponse providing helpers for dealing with binary data. Note: If you want to stream the response data instead of eagerly reading it all at once then you should use `.with_streaming_response` when making the API request, e.g. `.with_streaming_response.get_binary_response()` """ async def write_to_file( self, file: str | os.PathLike[str], ) -> None: """Write the output to the given file. Accepts a filename or any path-like object, e.g. pathlib.Path Note: if you want to stream the data to the file instead of writing all at once then you should use `.with_streaming_response` when making the API request, e.g. `.with_streaming_response.get_binary_response()` """ path = anyio.Path(file) async with await path.open(mode="wb") as f: async for data in self.iter_bytes(): await f.write(data) class StreamedBinaryAPIResponse(APIResponse[bytes]): def stream_to_file( self, file: str | os.PathLike[str], *, chunk_size: int | None = None, ) -> None: """Streams the output to the given file. Accepts a filename or any path-like object, e.g. pathlib.Path """ with open(file, mode="wb") as f: for data in self.iter_bytes(chunk_size): f.write(data) class AsyncStreamedBinaryAPIResponse(AsyncAPIResponse[bytes]): async def stream_to_file( self, file: str | os.PathLike[str], *, chunk_size: int | None = None, ) -> None: """Streams the output to the given file. Accepts a filename or any path-like object, e.g. pathlib.Path """ path = anyio.Path(file) async with await path.open(mode="wb") as f: async for data in self.iter_bytes(chunk_size): await f.write(data) class MissingStreamClassError(TypeError): def __init__(self) -> None: super().__init__( "The `stream` argument was set to `True` but the `stream_cls` argument was not given. See `openai._streaming` for reference", ) class StreamAlreadyConsumed(OpenAIError): """ Attempted to read or stream content, but the content has already been streamed. This can happen if you use a method like `.iter_lines()` and then attempt to read th entire response body afterwards, e.g. ```py response = await client.post(...) async for line in response.iter_lines(): ... # do something with `line` content = await response.read() # ^ error ``` If you want this behaviour you'll need to either manually accumulate the response content or call `await response.read()` before iterating over the stream. """ def __init__(self) -> None: message = ( "Attempted to read or stream some content, but the content has " "already been streamed. " "This could be due to attempting to stream the response " "content more than once." "\n\n" "You can fix this by manually accumulating the response content while streaming " "or by calling `.read()` before starting to stream." ) super().__init__(message) class ResponseContextManager(Generic[_APIResponseT]): """Context manager for ensuring that a request is not made until it is entered and that the response will always be closed when the context manager exits """ def __init__(self, request_func: Callable[[], _APIResponseT]) -> None: self._request_func = request_func self.__response: _APIResponseT | None = None def __enter__(self) -> _APIResponseT: self.__response = self._request_func() return self.__response def __exit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: if self.__response is not None: self.__response.close() class AsyncResponseContextManager(Generic[_AsyncAPIResponseT]): """Context manager for ensuring that a request is not made until it is entered and that the response will always be closed when the context manager exits """ def __init__(self, api_request: Awaitable[_AsyncAPIResponseT]) -> None: self._api_request = api_request self.__response: _AsyncAPIResponseT | None = None async def __aenter__(self) -> _AsyncAPIResponseT: self.__response = await self._api_request return self.__response async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: if self.__response is not None: await self.__response.close() def to_streamed_response_wrapper(func: Callable[P, R]) -> Callable[P, ResponseContextManager[APIResponse[R]]]: """Higher order function that takes one of our bound API methods and wraps it to support streaming and returning the raw `APIResponse` object directly. """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[APIResponse[R]]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "stream" kwargs["extra_headers"] = extra_headers make_request = functools.partial(func, *args, **kwargs) return ResponseContextManager(cast(Callable[[], APIResponse[R]], make_request)) return wrapped def async_to_streamed_response_wrapper( func: Callable[P, Awaitable[R]], ) -> Callable[P, AsyncResponseContextManager[AsyncAPIResponse[R]]]: """Higher order function that takes one of our bound API methods and wraps it to support streaming and returning the raw `APIResponse` object directly. """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[AsyncAPIResponse[R]]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "stream" kwargs["extra_headers"] = extra_headers make_request = func(*args, **kwargs) return AsyncResponseContextManager(cast(Awaitable[AsyncAPIResponse[R]], make_request)) return wrapped def to_custom_streamed_response_wrapper( func: Callable[P, object], response_cls: type[_APIResponseT], ) -> Callable[P, ResponseContextManager[_APIResponseT]]: """Higher order function that takes one of our bound API methods and an `APIResponse` class and wraps the method to support streaming and returning the given response class directly. Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> ResponseContextManager[_APIResponseT]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "stream" extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls kwargs["extra_headers"] = extra_headers make_request = functools.partial(func, *args, **kwargs) return ResponseContextManager(cast(Callable[[], _APIResponseT], make_request)) return wrapped def async_to_custom_streamed_response_wrapper( func: Callable[P, Awaitable[object]], response_cls: type[_AsyncAPIResponseT], ) -> Callable[P, AsyncResponseContextManager[_AsyncAPIResponseT]]: """Higher order function that takes one of our bound API methods and an `APIResponse` class and wraps the method to support streaming and returning the given response class directly. Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncResponseContextManager[_AsyncAPIResponseT]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "stream" extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls kwargs["extra_headers"] = extra_headers make_request = func(*args, **kwargs) return AsyncResponseContextManager(cast(Awaitable[_AsyncAPIResponseT], make_request)) return wrapped def to_raw_response_wrapper(func: Callable[P, R]) -> Callable[P, APIResponse[R]]: """Higher order function that takes one of our bound API methods and wraps it to support returning the raw `APIResponse` object directly. """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> APIResponse[R]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "raw" kwargs["extra_headers"] = extra_headers return cast(APIResponse[R], func(*args, **kwargs)) return wrapped def async_to_raw_response_wrapper(func: Callable[P, Awaitable[R]]) -> Callable[P, Awaitable[AsyncAPIResponse[R]]]: """Higher order function that takes one of our bound API methods and wraps it to support returning the raw `APIResponse` object directly. """ @functools.wraps(func) async def wrapped(*args: P.args, **kwargs: P.kwargs) -> AsyncAPIResponse[R]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "raw" kwargs["extra_headers"] = extra_headers return cast(AsyncAPIResponse[R], await func(*args, **kwargs)) return wrapped def to_custom_raw_response_wrapper( func: Callable[P, object], response_cls: type[_APIResponseT], ) -> Callable[P, _APIResponseT]: """Higher order function that takes one of our bound API methods and an `APIResponse` class and wraps the method to support returning the given response class directly. Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> _APIResponseT: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "raw" extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls kwargs["extra_headers"] = extra_headers return cast(_APIResponseT, func(*args, **kwargs)) return wrapped def async_to_custom_raw_response_wrapper( func: Callable[P, Awaitable[object]], response_cls: type[_AsyncAPIResponseT], ) -> Callable[P, Awaitable[_AsyncAPIResponseT]]: """Higher order function that takes one of our bound API methods and an `APIResponse` class and wraps the method to support returning the given response class directly. Note: the given `response_cls` *must* be concrete, e.g. `class BinaryAPIResponse(APIResponse[bytes])` """ @functools.wraps(func) def wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[_AsyncAPIResponseT]: extra_headers = {**(cast(Any, kwargs.get("extra_headers")) or {})} extra_headers[RAW_RESPONSE_HEADER] = "raw" extra_headers[OVERRIDE_CAST_TO_HEADER] = response_cls kwargs["extra_headers"] = extra_headers return cast(Awaitable[_AsyncAPIResponseT], func(*args, **kwargs)) return wrapped def extract_response_type(typ: type[BaseAPIResponse[Any]]) -> type: """Given a type like `APIResponse[T]`, returns the generic type variable `T`. This also handles the case where a concrete subclass is given, e.g. ```py class MyResponse(APIResponse[bytes]): ... extract_response_type(MyResponse) -> bytes ``` """ return extract_type_var_from_base( typ, generic_bases=cast("tuple[type, ...]", (BaseAPIResponse, APIResponse, AsyncAPIResponse)), index=0, ) openai-python-1.12.0/src/openai/_streaming.py000066400000000000000000000215121456126711000211350ustar00rootroot00000000000000# Note: initially copied from https://github.com/florimondmanca/httpx-sse/blob/master/src/httpx_sse/_decoders.py from __future__ import annotations import json import inspect from types import TracebackType from typing import TYPE_CHECKING, Any, Generic, TypeVar, Iterator, AsyncIterator, cast from typing_extensions import Self, TypeGuard, override, get_origin import httpx from ._utils import is_mapping, extract_type_var_from_base from ._exceptions import APIError if TYPE_CHECKING: from ._client import OpenAI, AsyncOpenAI _T = TypeVar("_T") class Stream(Generic[_T]): """Provides the core interface to iterate over a synchronous stream response.""" response: httpx.Response def __init__( self, *, cast_to: type[_T], response: httpx.Response, client: OpenAI, ) -> None: self.response = response self._cast_to = cast_to self._client = client self._decoder = SSEDecoder() self._iterator = self.__stream__() def __next__(self) -> _T: return self._iterator.__next__() def __iter__(self) -> Iterator[_T]: for item in self._iterator: yield item def _iter_events(self) -> Iterator[ServerSentEvent]: yield from self._decoder.iter(self.response.iter_lines()) def __stream__(self) -> Iterator[_T]: cast_to = cast(Any, self._cast_to) response = self.response process_data = self._client._process_response_data iterator = self._iter_events() for sse in iterator: if sse.data.startswith("[DONE]"): break if sse.event is None: data = sse.json() if is_mapping(data) and data.get("error"): raise APIError( message="An error occurred during streaming", request=self.response.request, body=data["error"], ) yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed for _sse in iterator: ... def __enter__(self) -> Self: return self def __exit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: self.close() def close(self) -> None: """ Close the response and release the connection. Automatically called if the response body is read to completion. """ self.response.close() class AsyncStream(Generic[_T]): """Provides the core interface to iterate over an asynchronous stream response.""" response: httpx.Response def __init__( self, *, cast_to: type[_T], response: httpx.Response, client: AsyncOpenAI, ) -> None: self.response = response self._cast_to = cast_to self._client = client self._decoder = SSEDecoder() self._iterator = self.__stream__() async def __anext__(self) -> _T: return await self._iterator.__anext__() async def __aiter__(self) -> AsyncIterator[_T]: async for item in self._iterator: yield item async def _iter_events(self) -> AsyncIterator[ServerSentEvent]: async for sse in self._decoder.aiter(self.response.aiter_lines()): yield sse async def __stream__(self) -> AsyncIterator[_T]: cast_to = cast(Any, self._cast_to) response = self.response process_data = self._client._process_response_data iterator = self._iter_events() async for sse in iterator: if sse.data.startswith("[DONE]"): break if sse.event is None: data = sse.json() if is_mapping(data) and data.get("error"): raise APIError( message="An error occurred during streaming", request=self.response.request, body=data["error"], ) yield process_data(data=data, cast_to=cast_to, response=response) # Ensure the entire stream is consumed async for _sse in iterator: ... async def __aenter__(self) -> Self: return self async def __aexit__( self, exc_type: type[BaseException] | None, exc: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.close() async def close(self) -> None: """ Close the response and release the connection. Automatically called if the response body is read to completion. """ await self.response.aclose() class ServerSentEvent: def __init__( self, *, event: str | None = None, data: str | None = None, id: str | None = None, retry: int | None = None, ) -> None: if data is None: data = "" self._id = id self._data = data self._event = event or None self._retry = retry @property def event(self) -> str | None: return self._event @property def id(self) -> str | None: return self._id @property def retry(self) -> int | None: return self._retry @property def data(self) -> str: return self._data def json(self) -> Any: return json.loads(self.data) @override def __repr__(self) -> str: return f"ServerSentEvent(event={self.event}, data={self.data}, id={self.id}, retry={self.retry})" class SSEDecoder: _data: list[str] _event: str | None _retry: int | None _last_event_id: str | None def __init__(self) -> None: self._event = None self._data = [] self._last_event_id = None self._retry = None def iter(self, iterator: Iterator[str]) -> Iterator[ServerSentEvent]: """Given an iterator that yields lines, iterate over it & yield every event encountered""" for line in iterator: line = line.rstrip("\n") sse = self.decode(line) if sse is not None: yield sse async def aiter(self, iterator: AsyncIterator[str]) -> AsyncIterator[ServerSentEvent]: """Given an async iterator that yields lines, iterate over it & yield every event encountered""" async for line in iterator: line = line.rstrip("\n") sse = self.decode(line) if sse is not None: yield sse def decode(self, line: str) -> ServerSentEvent | None: # See: https://html.spec.whatwg.org/multipage/server-sent-events.html#event-stream-interpretation # noqa: E501 if not line: if not self._event and not self._data and not self._last_event_id and self._retry is None: return None sse = ServerSentEvent( event=self._event, data="\n".join(self._data), id=self._last_event_id, retry=self._retry, ) # NOTE: as per the SSE spec, do not reset last_event_id. self._event = None self._data = [] self._retry = None return sse if line.startswith(":"): return None fieldname, _, value = line.partition(":") if value.startswith(" "): value = value[1:] if fieldname == "event": self._event = value elif fieldname == "data": self._data.append(value) elif fieldname == "id": if "\0" in value: pass else: self._last_event_id = value elif fieldname == "retry": try: self._retry = int(value) except (TypeError, ValueError): pass else: pass # Field is ignored. return None def is_stream_class_type(typ: type) -> TypeGuard[type[Stream[object]] | type[AsyncStream[object]]]: """TypeGuard for determining whether or not the given type is a subclass of `Stream` / `AsyncStream`""" origin = get_origin(typ) or typ return inspect.isclass(origin) and issubclass(origin, (Stream, AsyncStream)) def extract_stream_chunk_type( stream_cls: type, *, failure_message: str | None = None, ) -> type: """Given a type like `Stream[T]`, returns the generic type variable `T`. This also handles the case where a concrete subclass is given, e.g. ```py class MyStream(Stream[bytes]): ... extract_stream_chunk_type(MyStream) -> bytes ``` """ from ._base_client import Stream, AsyncStream return extract_type_var_from_base( stream_cls, index=0, generic_bases=cast("tuple[type, ...]", (Stream, AsyncStream)), failure_message=failure_message, ) openai-python-1.12.0/src/openai/_types.py000066400000000000000000000137531456126711000203200ustar00rootroot00000000000000from __future__ import annotations from os import PathLike from typing import ( IO, TYPE_CHECKING, Any, Dict, List, Type, Tuple, Union, Mapping, TypeVar, Callable, Optional, Sequence, ) from typing_extensions import Literal, Protocol, TypeAlias, TypedDict, override, runtime_checkable import httpx import pydantic from httpx import URL, Proxy, Timeout, Response, BaseTransport, AsyncBaseTransport if TYPE_CHECKING: from ._models import BaseModel from ._response import APIResponse, AsyncAPIResponse from ._legacy_response import HttpxBinaryResponseContent Transport = BaseTransport AsyncTransport = AsyncBaseTransport Query = Mapping[str, object] Body = object AnyMapping = Mapping[str, object] ModelT = TypeVar("ModelT", bound=pydantic.BaseModel) _T = TypeVar("_T") # Approximates httpx internal ProxiesTypes and RequestFiles types # while adding support for `PathLike` instances ProxiesDict = Dict["str | URL", Union[None, str, URL, Proxy]] ProxiesTypes = Union[str, Proxy, ProxiesDict] if TYPE_CHECKING: FileContent = Union[IO[bytes], bytes, PathLike[str]] else: FileContent = Union[IO[bytes], bytes, PathLike] # PathLike is not subscriptable in Python 3.8. FileTypes = Union[ # file (or bytes) FileContent, # (filename, file (or bytes)) Tuple[Optional[str], FileContent], # (filename, file (or bytes), content_type) Tuple[Optional[str], FileContent, Optional[str]], # (filename, file (or bytes), content_type, headers) Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]], ] RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]] # duplicate of the above but without our custom file support HttpxFileContent = Union[IO[bytes], bytes] HttpxFileTypes = Union[ # file (or bytes) HttpxFileContent, # (filename, file (or bytes)) Tuple[Optional[str], HttpxFileContent], # (filename, file (or bytes), content_type) Tuple[Optional[str], HttpxFileContent, Optional[str]], # (filename, file (or bytes), content_type, headers) Tuple[Optional[str], HttpxFileContent, Optional[str], Mapping[str, str]], ] HttpxRequestFiles = Union[Mapping[str, HttpxFileTypes], Sequence[Tuple[str, HttpxFileTypes]]] # Workaround to support (cast_to: Type[ResponseT]) -> ResponseT # where ResponseT includes `None`. In order to support directly # passing `None`, overloads would have to be defined for every # method that uses `ResponseT` which would lead to an unacceptable # amount of code duplication and make it unreadable. See _base_client.py # for example usage. # # This unfortunately means that you will either have # to import this type and pass it explicitly: # # from openai import NoneType # client.get('/foo', cast_to=NoneType) # # or build it yourself: # # client.get('/foo', cast_to=type(None)) if TYPE_CHECKING: NoneType: Type[None] else: NoneType = type(None) class RequestOptions(TypedDict, total=False): headers: Headers max_retries: int timeout: float | Timeout | None params: Query extra_json: AnyMapping idempotency_key: str # Sentinel class used until PEP 0661 is accepted class NotGiven: """ A sentinel singleton class used to distinguish omitted keyword arguments from those passed in with the value None (which may have different behavior). For example: ```py def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ... get(timeout=1) # 1s timeout get(timeout=None) # No timeout get() # Default timeout behavior, which may not be statically known at the method definition. ``` """ def __bool__(self) -> Literal[False]: return False @override def __repr__(self) -> str: return "NOT_GIVEN" NotGivenOr = Union[_T, NotGiven] NOT_GIVEN = NotGiven() class Omit: """In certain situations you need to be able to represent a case where a default value has to be explicitly removed and `None` is not an appropriate substitute, for example: ```py # as the default `Content-Type` header is `application/json` that will be sent client.post("/upload/files", files={"file": b"my raw file content"}) # you can't explicitly override the header as it has to be dynamically generated # to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983' client.post(..., headers={"Content-Type": "multipart/form-data"}) # instead you can remove the default `application/json` header by passing Omit client.post(..., headers={"Content-Type": Omit()}) ``` """ def __bool__(self) -> Literal[False]: return False @runtime_checkable class ModelBuilderProtocol(Protocol): @classmethod def build( cls: type[_T], *, response: Response, data: object, ) -> _T: ... Headers = Mapping[str, Union[str, Omit]] class HeadersLikeProtocol(Protocol): def get(self, __key: str) -> str | None: ... HeadersLike = Union[Headers, HeadersLikeProtocol] ResponseT = TypeVar( "ResponseT", bound=Union[ object, str, None, "BaseModel", List[Any], Dict[str, Any], Response, ModelBuilderProtocol, "APIResponse[Any]", "AsyncAPIResponse[Any]", "HttpxBinaryResponseContent", ], ) StrBytesIntFloat = Union[str, bytes, int, float] # Note: copied from Pydantic # https://github.com/pydantic/pydantic/blob/32ea570bf96e84234d2992e1ddf40ab8a565925a/pydantic/main.py#L49 IncEx: TypeAlias = "set[int] | set[str] | dict[int, Any] | dict[str, Any] | None" PostParser = Callable[[Any], Any] @runtime_checkable class InheritsGeneric(Protocol): """Represents a type that has inherited from `Generic` The `__orig_bases__` property can be used to determine the resolved type variable for a given base class. """ __orig_bases__: tuple[_GenericAlias] class _GenericAlias(Protocol): __origin__: type[object] class HttpxSendArgs(TypedDict, total=False): auth: httpx.Auth openai-python-1.12.0/src/openai/_utils/000077500000000000000000000000001456126711000177315ustar00rootroot00000000000000openai-python-1.12.0/src/openai/_utils/__init__.py000066400000000000000000000032771456126711000220530ustar00rootroot00000000000000from ._sync import asyncify as asyncify from ._proxy import LazyProxy as LazyProxy from ._utils import ( flatten as flatten, is_dict as is_dict, is_list as is_list, is_given as is_given, is_tuple as is_tuple, is_mapping as is_mapping, is_tuple_t as is_tuple_t, parse_date as parse_date, is_iterable as is_iterable, is_sequence as is_sequence, coerce_float as coerce_float, is_mapping_t as is_mapping_t, removeprefix as removeprefix, removesuffix as removesuffix, extract_files as extract_files, is_sequence_t as is_sequence_t, required_args as required_args, coerce_boolean as coerce_boolean, coerce_integer as coerce_integer, file_from_path as file_from_path, parse_datetime as parse_datetime, strip_not_given as strip_not_given, deepcopy_minimal as deepcopy_minimal, get_async_library as get_async_library, maybe_coerce_float as maybe_coerce_float, get_required_header as get_required_header, maybe_coerce_boolean as maybe_coerce_boolean, maybe_coerce_integer as maybe_coerce_integer, ) from ._typing import ( is_list_type as is_list_type, is_union_type as is_union_type, extract_type_arg as extract_type_arg, is_iterable_type as is_iterable_type, is_required_type as is_required_type, is_annotated_type as is_annotated_type, strip_annotated_type as strip_annotated_type, extract_type_var_from_base as extract_type_var_from_base, ) from ._streams import consume_sync_iterator as consume_sync_iterator, consume_async_iterator as consume_async_iterator from ._transform import ( PropertyInfo as PropertyInfo, transform as transform, maybe_transform as maybe_transform, ) openai-python-1.12.0/src/openai/_utils/_logs.py000066400000000000000000000014061456126711000214070ustar00rootroot00000000000000import os import logging logger: logging.Logger = logging.getLogger("openai") httpx_logger: logging.Logger = logging.getLogger("httpx") def _basic_config() -> None: # e.g. [2023-10-05 14:12:26 - openai._base_client:818 - DEBUG] HTTP Request: POST http://127.0.0.1:4010/foo/bar "200 OK" logging.basicConfig( format="[%(asctime)s - %(name)s:%(lineno)d - %(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) def setup_logging() -> None: env = os.environ.get("OPENAI_LOG") if env == "debug": _basic_config() logger.setLevel(logging.DEBUG) httpx_logger.setLevel(logging.DEBUG) elif env == "info": _basic_config() logger.setLevel(logging.INFO) httpx_logger.setLevel(logging.INFO) openai-python-1.12.0/src/openai/_utils/_proxy.py000066400000000000000000000035421456126711000216270ustar00rootroot00000000000000from __future__ import annotations from abc import ABC, abstractmethod from typing import Generic, TypeVar, Iterable, cast from typing_extensions import override T = TypeVar("T") class LazyProxy(Generic[T], ABC): """Implements data methods to pretend that an instance is another instance. This includes forwarding attribute access and othe methods. """ # Note: we have to special case proxies that themselves return proxies # to support using a proxy as a catch-all for any random access, e.g. `proxy.foo.bar.baz` def __getattr__(self, attr: str) -> object: proxied = self.__get_proxied__() if isinstance(proxied, LazyProxy): return proxied # pyright: ignore return getattr(proxied, attr) @override def __repr__(self) -> str: proxied = self.__get_proxied__() if isinstance(proxied, LazyProxy): return proxied.__class__.__name__ return repr(self.__get_proxied__()) @override def __str__(self) -> str: proxied = self.__get_proxied__() if isinstance(proxied, LazyProxy): return proxied.__class__.__name__ return str(proxied) @override def __dir__(self) -> Iterable[str]: proxied = self.__get_proxied__() if isinstance(proxied, LazyProxy): return [] return proxied.__dir__() @property # type: ignore @override def __class__(self) -> type: proxied = self.__get_proxied__() if issubclass(type(proxied), LazyProxy): return type(proxied) return proxied.__class__ def __get_proxied__(self) -> T: return self.__load__() def __as_proxied__(self) -> T: """Helper method that returns the current proxy, typed as the loaded object""" return cast(T, self) @abstractmethod def __load__(self) -> T: ... openai-python-1.12.0/src/openai/_utils/_streams.py000066400000000000000000000004411456126711000221170ustar00rootroot00000000000000from typing import Any from typing_extensions import Iterator, AsyncIterator def consume_sync_iterator(iterator: Iterator[Any]) -> None: for _ in iterator: ... async def consume_async_iterator(iterator: AsyncIterator[Any]) -> None: async for _ in iterator: ... openai-python-1.12.0/src/openai/_utils/_sync.py000066400000000000000000000043351456126711000214230ustar00rootroot00000000000000from __future__ import annotations import functools from typing import TypeVar, Callable, Awaitable from typing_extensions import ParamSpec import anyio import anyio.to_thread T_Retval = TypeVar("T_Retval") T_ParamSpec = ParamSpec("T_ParamSpec") # copied from `asyncer`, https://github.com/tiangolo/asyncer def asyncify( function: Callable[T_ParamSpec, T_Retval], *, cancellable: bool = False, limiter: anyio.CapacityLimiter | None = None, ) -> Callable[T_ParamSpec, Awaitable[T_Retval]]: """ Take a blocking function and create an async one that receives the same positional and keyword arguments, and that when called, calls the original function in a worker thread using `anyio.to_thread.run_sync()`. Internally, `asyncer.asyncify()` uses the same `anyio.to_thread.run_sync()`, but it supports keyword arguments additional to positional arguments and it adds better support for autocompletion and inline errors for the arguments of the function called and the return value. If the `cancellable` option is enabled and the task waiting for its completion is cancelled, the thread will still run its course but its return value (or any raised exception) will be ignored. Use it like this: ```Python def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: # Do work return "Some result" result = await to_thread.asyncify(do_work)("spam", "ham", kwarg1="a", kwarg2="b") print(result) ``` ## Arguments `function`: a blocking regular callable (e.g. a function) `cancellable`: `True` to allow cancellation of the operation `limiter`: capacity limiter to use to limit the total amount of threads running (if omitted, the default limiter is used) ## Return An async function that takes the same positional and keyword arguments as the original one, that when called runs the same original function in a thread worker and returns the result. """ async def wrapper(*args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs) -> T_Retval: partial_f = functools.partial(function, *args, **kwargs) return await anyio.to_thread.run_sync(partial_f, cancellable=cancellable, limiter=limiter) return wrapper openai-python-1.12.0/src/openai/_utils/_transform.py000066400000000000000000000160671456126711000224670ustar00rootroot00000000000000from __future__ import annotations from typing import Any, Mapping, TypeVar, cast from datetime import date, datetime from typing_extensions import Literal, get_args, override, get_type_hints import pydantic from ._utils import ( is_list, is_mapping, is_iterable, ) from ._typing import ( is_list_type, is_union_type, extract_type_arg, is_iterable_type, is_required_type, is_annotated_type, strip_annotated_type, ) from .._compat import model_dump, is_typeddict _T = TypeVar("_T") # TODO: support for drilling globals() and locals() # TODO: ensure works correctly with forward references in all cases PropertyFormat = Literal["iso8601", "custom"] class PropertyInfo: """Metadata class to be used in Annotated types to provide information about a given type. For example: class MyParams(TypedDict): account_holder_name: Annotated[str, PropertyInfo(alias='accountHolderName')] This means that {'account_holder_name': 'Robert'} will be transformed to {'accountHolderName': 'Robert'} before being sent to the API. """ alias: str | None format: PropertyFormat | None format_template: str | None def __init__( self, *, alias: str | None = None, format: PropertyFormat | None = None, format_template: str | None = None, ) -> None: self.alias = alias self.format = format self.format_template = format_template @override def __repr__(self) -> str: return f"{self.__class__.__name__}(alias='{self.alias}', format={self.format}, format_template='{self.format_template}')" def maybe_transform( data: object, expected_type: object, ) -> Any | None: """Wrapper over `transform()` that allows `None` to be passed. See `transform()` for more details. """ if data is None: return None return transform(data, expected_type) # Wrapper over _transform_recursive providing fake types def transform( data: _T, expected_type: object, ) -> _T: """Transform dictionaries based off of type information from the given type, for example: ```py class Params(TypedDict, total=False): card_id: Required[Annotated[str, PropertyInfo(alias="cardID")]] transformed = transform({"card_id": ""}, Params) # {'cardID': ''} ``` Any keys / data that does not have type information given will be included as is. It should be noted that the transformations that this function does are not represented in the type system. """ transformed = _transform_recursive(data, annotation=cast(type, expected_type)) return cast(_T, transformed) def _get_annotated_type(type_: type) -> type | None: """If the given type is an `Annotated` type then it is returned, if not `None` is returned. This also unwraps the type when applicable, e.g. `Required[Annotated[T, ...]]` """ if is_required_type(type_): # Unwrap `Required[Annotated[T, ...]]` to `Annotated[T, ...]` type_ = get_args(type_)[0] if is_annotated_type(type_): return type_ return None def _maybe_transform_key(key: str, type_: type) -> str: """Transform the given `data` based on the annotations provided in `type_`. Note: this function only looks at `Annotated` types that contain `PropertInfo` metadata. """ annotated_type = _get_annotated_type(type_) if annotated_type is None: # no `Annotated` definition for this type, no transformation needed return key # ignore the first argument as it is the actual type annotations = get_args(annotated_type)[1:] for annotation in annotations: if isinstance(annotation, PropertyInfo) and annotation.alias is not None: return annotation.alias return key def _transform_recursive( data: object, *, annotation: type, inner_type: type | None = None, ) -> object: """Transform the given data against the expected type. Args: annotation: The direct type annotation given to the particular piece of data. This may or may not be wrapped in metadata types, e.g. `Required[T]`, `Annotated[T, ...]` etc inner_type: If applicable, this is the "inside" type. This is useful in certain cases where the outside type is a container type such as `List[T]`. In that case `inner_type` should be set to `T` so that each entry in the list can be transformed using the metadata from the container type. Defaults to the same value as the `annotation` argument. """ if inner_type is None: inner_type = annotation stripped_type = strip_annotated_type(inner_type) if is_typeddict(stripped_type) and is_mapping(data): return _transform_typeddict(data, stripped_type) if ( # List[T] (is_list_type(stripped_type) and is_list(data)) # Iterable[T] or (is_iterable_type(stripped_type) and is_iterable(data) and not isinstance(data, str)) ): inner_type = extract_type_arg(stripped_type, 0) return [_transform_recursive(d, annotation=annotation, inner_type=inner_type) for d in data] if is_union_type(stripped_type): # For union types we run the transformation against all subtypes to ensure that everything is transformed. # # TODO: there may be edge cases where the same normalized field name will transform to two different names # in different subtypes. for subtype in get_args(stripped_type): data = _transform_recursive(data, annotation=annotation, inner_type=subtype) return data if isinstance(data, pydantic.BaseModel): return model_dump(data, exclude_unset=True) return _transform_value(data, annotation) def _transform_value(data: object, type_: type) -> object: annotated_type = _get_annotated_type(type_) if annotated_type is None: return data # ignore the first argument as it is the actual type annotations = get_args(annotated_type)[1:] for annotation in annotations: if isinstance(annotation, PropertyInfo) and annotation.format is not None: return _format_data(data, annotation.format, annotation.format_template) return data def _format_data(data: object, format_: PropertyFormat, format_template: str | None) -> object: if isinstance(data, (date, datetime)): if format_ == "iso8601": return data.isoformat() if format_ == "custom" and format_template is not None: return data.strftime(format_template) return data def _transform_typeddict( data: Mapping[str, object], expected_type: type, ) -> Mapping[str, object]: result: dict[str, object] = {} annotations = get_type_hints(expected_type, include_extras=True) for key, value in data.items(): type_ = annotations.get(key) if type_ is None: # we do not have a type annotation for this field, leave it as is result[key] = value else: result[_maybe_transform_key(key, type_)] = _transform_recursive(value, annotation=type_) return result openai-python-1.12.0/src/openai/_utils/_typing.py000066400000000000000000000073761456126711000217710ustar00rootroot00000000000000from __future__ import annotations from typing import Any, TypeVar, Iterable, cast from collections import abc as _c_abc from typing_extensions import Required, Annotated, get_args, get_origin from .._types import InheritsGeneric from .._compat import is_union as _is_union def is_annotated_type(typ: type) -> bool: return get_origin(typ) == Annotated def is_list_type(typ: type) -> bool: return (get_origin(typ) or typ) == list def is_iterable_type(typ: type) -> bool: """If the given type is `typing.Iterable[T]`""" origin = get_origin(typ) or typ return origin == Iterable or origin == _c_abc.Iterable def is_union_type(typ: type) -> bool: return _is_union(get_origin(typ)) def is_required_type(typ: type) -> bool: return get_origin(typ) == Required def is_typevar(typ: type) -> bool: # type ignore is required because type checkers # think this expression will always return False return type(typ) == TypeVar # type: ignore # Extracts T from Annotated[T, ...] or from Required[Annotated[T, ...]] def strip_annotated_type(typ: type) -> type: if is_required_type(typ) or is_annotated_type(typ): return strip_annotated_type(cast(type, get_args(typ)[0])) return typ def extract_type_arg(typ: type, index: int) -> type: args = get_args(typ) try: return cast(type, args[index]) except IndexError as err: raise RuntimeError(f"Expected type {typ} to have a type argument at index {index} but it did not") from err def extract_type_var_from_base( typ: type, *, generic_bases: tuple[type, ...], index: int, failure_message: str | None = None, ) -> type: """Given a type like `Foo[T]`, returns the generic type variable `T`. This also handles the case where a concrete subclass is given, e.g. ```py class MyResponse(Foo[bytes]): ... extract_type_var(MyResponse, bases=(Foo,), index=0) -> bytes ``` And where a generic subclass is given: ```py _T = TypeVar('_T') class MyResponse(Foo[_T]): ... extract_type_var(MyResponse[bytes], bases=(Foo,), index=0) -> bytes ``` """ cls = cast(object, get_origin(typ) or typ) if cls in generic_bases: # we're given the class directly return extract_type_arg(typ, index) # if a subclass is given # --- # this is needed as __orig_bases__ is not present in the typeshed stubs # because it is intended to be for internal use only, however there does # not seem to be a way to resolve generic TypeVars for inherited subclasses # without using it. if isinstance(cls, InheritsGeneric): target_base_class: Any | None = None for base in cls.__orig_bases__: if base.__origin__ in generic_bases: target_base_class = base break if target_base_class is None: raise RuntimeError( "Could not find the generic base class;\n" "This should never happen;\n" f"Does {cls} inherit from one of {generic_bases} ?" ) extracted = extract_type_arg(target_base_class, index) if is_typevar(extracted): # If the extracted type argument is itself a type variable # then that means the subclass itself is generic, so we have # to resolve the type argument from the class itself, not # the base class. # # Note: if there is more than 1 type argument, the subclass could # change the ordering of the type arguments, this is not currently # supported. return extract_type_arg(typ, index) return extracted raise RuntimeError(failure_message or f"Could not resolve inner type variable at index {index} for {typ}") openai-python-1.12.0/src/openai/_utils/_utils.py000066400000000000000000000255411456126711000216110ustar00rootroot00000000000000from __future__ import annotations import os import re import inspect import functools from typing import ( Any, Tuple, Mapping, TypeVar, Callable, Iterable, Sequence, cast, overload, ) from pathlib import Path from typing_extensions import TypeGuard import sniffio from .._types import Headers, NotGiven, FileTypes, NotGivenOr, HeadersLike from .._compat import parse_date as parse_date, parse_datetime as parse_datetime _T = TypeVar("_T") _TupleT = TypeVar("_TupleT", bound=Tuple[object, ...]) _MappingT = TypeVar("_MappingT", bound=Mapping[str, object]) _SequenceT = TypeVar("_SequenceT", bound=Sequence[object]) CallableT = TypeVar("CallableT", bound=Callable[..., Any]) def flatten(t: Iterable[Iterable[_T]]) -> list[_T]: return [item for sublist in t for item in sublist] def extract_files( # TODO: this needs to take Dict but variance issues..... # create protocol type ? query: Mapping[str, object], *, paths: Sequence[Sequence[str]], ) -> list[tuple[str, FileTypes]]: """Recursively extract files from the given dictionary based on specified paths. A path may look like this ['foo', 'files', '', 'data']. Note: this mutates the given dictionary. """ files: list[tuple[str, FileTypes]] = [] for path in paths: files.extend(_extract_items(query, path, index=0, flattened_key=None)) return files def _extract_items( obj: object, path: Sequence[str], *, index: int, flattened_key: str | None, ) -> list[tuple[str, FileTypes]]: try: key = path[index] except IndexError: if isinstance(obj, NotGiven): # no value was provided - we can safely ignore return [] # cyclical import from .._files import assert_is_file_content # We have exhausted the path, return the entry we found. assert_is_file_content(obj, key=flattened_key) assert flattened_key is not None return [(flattened_key, cast(FileTypes, obj))] index += 1 if is_dict(obj): try: # We are at the last entry in the path so we must remove the field if (len(path)) == index: item = obj.pop(key) else: item = obj[key] except KeyError: # Key was not present in the dictionary, this is not indicative of an error # as the given path may not point to a required field. We also do not want # to enforce required fields as the API may differ from the spec in some cases. return [] if flattened_key is None: flattened_key = key else: flattened_key += f"[{key}]" return _extract_items( item, path, index=index, flattened_key=flattened_key, ) elif is_list(obj): if key != "": return [] return flatten( [ _extract_items( item, path, index=index, flattened_key=flattened_key + "[]" if flattened_key is not None else "[]", ) for item in obj ] ) # Something unexpected was passed, just ignore it. return [] def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]: return not isinstance(obj, NotGiven) # Type safe methods for narrowing types with TypeVars. # The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], # however this cause Pyright to rightfully report errors. As we know we don't # care about the contained types we can safely use `object` in it's place. # # There are two separate functions defined, `is_*` and `is_*_t` for different use cases. # `is_*` is for when you're dealing with an unknown input # `is_*_t` is for when you're narrowing a known union type to a specific subset def is_tuple(obj: object) -> TypeGuard[tuple[object, ...]]: return isinstance(obj, tuple) def is_tuple_t(obj: _TupleT | object) -> TypeGuard[_TupleT]: return isinstance(obj, tuple) def is_sequence(obj: object) -> TypeGuard[Sequence[object]]: return isinstance(obj, Sequence) def is_sequence_t(obj: _SequenceT | object) -> TypeGuard[_SequenceT]: return isinstance(obj, Sequence) def is_mapping(obj: object) -> TypeGuard[Mapping[str, object]]: return isinstance(obj, Mapping) def is_mapping_t(obj: _MappingT | object) -> TypeGuard[_MappingT]: return isinstance(obj, Mapping) def is_dict(obj: object) -> TypeGuard[dict[object, object]]: return isinstance(obj, dict) def is_list(obj: object) -> TypeGuard[list[object]]: return isinstance(obj, list) def is_iterable(obj: object) -> TypeGuard[Iterable[object]]: return isinstance(obj, Iterable) def deepcopy_minimal(item: _T) -> _T: """Minimal reimplementation of copy.deepcopy() that will only copy certain object types: - mappings, e.g. `dict` - list This is done for performance reasons. """ if is_mapping(item): return cast(_T, {k: deepcopy_minimal(v) for k, v in item.items()}) if is_list(item): return cast(_T, [deepcopy_minimal(entry) for entry in item]) return item # copied from https://github.com/Rapptz/RoboDanny def human_join(seq: Sequence[str], *, delim: str = ", ", final: str = "or") -> str: size = len(seq) if size == 0: return "" if size == 1: return seq[0] if size == 2: return f"{seq[0]} {final} {seq[1]}" return delim.join(seq[:-1]) + f" {final} {seq[-1]}" def quote(string: str) -> str: """Add single quotation marks around the given string. Does *not* do any escaping.""" return f"'{string}'" def required_args(*variants: Sequence[str]) -> Callable[[CallableT], CallableT]: """Decorator to enforce a given set of arguments or variants of arguments are passed to the decorated function. Useful for enforcing runtime validation of overloaded functions. Example usage: ```py @overload def foo(*, a: str) -> str: ... @overload def foo(*, b: bool) -> str: ... # This enforces the same constraints that a static type checker would # i.e. that either a or b must be passed to the function @required_args(["a"], ["b"]) def foo(*, a: str | None = None, b: bool | None = None) -> str: ... ``` """ def inner(func: CallableT) -> CallableT: params = inspect.signature(func).parameters positional = [ name for name, param in params.items() if param.kind in { param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD, } ] @functools.wraps(func) def wrapper(*args: object, **kwargs: object) -> object: given_params: set[str] = set() for i, _ in enumerate(args): try: given_params.add(positional[i]) except IndexError: raise TypeError( f"{func.__name__}() takes {len(positional)} argument(s) but {len(args)} were given" ) from None for key in kwargs.keys(): given_params.add(key) for variant in variants: matches = all((param in given_params for param in variant)) if matches: break else: # no break if len(variants) > 1: variations = human_join( ["(" + human_join([quote(arg) for arg in variant], final="and") + ")" for variant in variants] ) msg = f"Missing required arguments; Expected either {variations} arguments to be given" else: # TODO: this error message is not deterministic missing = list(set(variants[0]) - given_params) if len(missing) > 1: msg = f"Missing required arguments: {human_join([quote(arg) for arg in missing])}" else: msg = f"Missing required argument: {quote(missing[0])}" raise TypeError(msg) return func(*args, **kwargs) return wrapper # type: ignore return inner _K = TypeVar("_K") _V = TypeVar("_V") @overload def strip_not_given(obj: None) -> None: ... @overload def strip_not_given(obj: Mapping[_K, _V | NotGiven]) -> dict[_K, _V]: ... @overload def strip_not_given(obj: object) -> object: ... def strip_not_given(obj: object | None) -> object: """Remove all top-level keys where their values are instances of `NotGiven`""" if obj is None: return None if not is_mapping(obj): return obj return {key: value for key, value in obj.items() if not isinstance(value, NotGiven)} def coerce_integer(val: str) -> int: return int(val, base=10) def coerce_float(val: str) -> float: return float(val) def coerce_boolean(val: str) -> bool: return val == "true" or val == "1" or val == "on" def maybe_coerce_integer(val: str | None) -> int | None: if val is None: return None return coerce_integer(val) def maybe_coerce_float(val: str | None) -> float | None: if val is None: return None return coerce_float(val) def maybe_coerce_boolean(val: str | None) -> bool | None: if val is None: return None return coerce_boolean(val) def removeprefix(string: str, prefix: str) -> str: """Remove a prefix from a string. Backport of `str.removeprefix` for Python < 3.9 """ if string.startswith(prefix): return string[len(prefix) :] return string def removesuffix(string: str, suffix: str) -> str: """Remove a suffix from a string. Backport of `str.removesuffix` for Python < 3.9 """ if string.endswith(suffix): return string[: -len(suffix)] return string def file_from_path(path: str) -> FileTypes: contents = Path(path).read_bytes() file_name = os.path.basename(path) return (file_name, contents) def get_required_header(headers: HeadersLike, header: str) -> str: lower_header = header.lower() if isinstance(headers, Mapping): headers = cast(Headers, headers) for k, v in headers.items(): if k.lower() == lower_header and isinstance(v, str): return v """ to deal with the case where the header looks like Stainless-Event-Id """ intercaps_header = re.sub(r"([^\w])(\w)", lambda pat: pat.group(1) + pat.group(2).upper(), header.capitalize()) for normalized_header in [header, lower_header, header.upper(), intercaps_header]: value = headers.get(normalized_header) if value: return value raise ValueError(f"Could not find {header} header") def get_async_library() -> str: try: return sniffio.current_async_library() except Exception: return "false" openai-python-1.12.0/src/openai/_version.py000066400000000000000000000001761456126711000206340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. __title__ = "openai" __version__ = "1.12.0" # x-release-please-version openai-python-1.12.0/src/openai/cli/000077500000000000000000000000001456126711000172015ustar00rootroot00000000000000openai-python-1.12.0/src/openai/cli/__init__.py000066400000000000000000000000371456126711000213120ustar00rootroot00000000000000from ._cli import main as main openai-python-1.12.0/src/openai/cli/_api/000077500000000000000000000000001456126711000201115ustar00rootroot00000000000000openai-python-1.12.0/src/openai/cli/_api/__init__.py000066400000000000000000000000721456126711000222210ustar00rootroot00000000000000from ._main import register_commands as register_commands openai-python-1.12.0/src/openai/cli/_api/_main.py000066400000000000000000000007031456126711000215460ustar00rootroot00000000000000from __future__ import annotations from argparse import ArgumentParser from . import chat, audio, files, image, models, completions def register_commands(parser: ArgumentParser) -> None: subparsers = parser.add_subparsers(help="All API subcommands") chat.register(subparsers) image.register(subparsers) audio.register(subparsers) files.register(subparsers) models.register(subparsers) completions.register(subparsers) openai-python-1.12.0/src/openai/cli/_api/audio.py000066400000000000000000000064231456126711000215710ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any, Optional, cast from argparse import ArgumentParser from .._utils import get_client, print_model from ..._types import NOT_GIVEN from .._models import BaseModel from .._progress import BufferReader if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: # transcriptions sub = subparser.add_parser("audio.transcriptions.create") # Required sub.add_argument("-m", "--model", type=str, default="whisper-1") sub.add_argument("-f", "--file", type=str, required=True) # Optional sub.add_argument("--response-format", type=str) sub.add_argument("--language", type=str) sub.add_argument("-t", "--temperature", type=float) sub.add_argument("--prompt", type=str) sub.set_defaults(func=CLIAudio.transcribe, args_model=CLITranscribeArgs) # translations sub = subparser.add_parser("audio.translations.create") # Required sub.add_argument("-f", "--file", type=str, required=True) # Optional sub.add_argument("-m", "--model", type=str, default="whisper-1") sub.add_argument("--response-format", type=str) # TODO: doesn't seem to be supported by the API # sub.add_argument("--language", type=str) sub.add_argument("-t", "--temperature", type=float) sub.add_argument("--prompt", type=str) sub.set_defaults(func=CLIAudio.translate, args_model=CLITranslationArgs) class CLITranscribeArgs(BaseModel): model: str file: str response_format: Optional[str] = None language: Optional[str] = None temperature: Optional[float] = None prompt: Optional[str] = None class CLITranslationArgs(BaseModel): model: str file: str response_format: Optional[str] = None language: Optional[str] = None temperature: Optional[float] = None prompt: Optional[str] = None class CLIAudio: @staticmethod def transcribe(args: CLITranscribeArgs) -> None: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") model = get_client().audio.transcriptions.create( file=(args.file, buffer_reader), model=args.model, language=args.language or NOT_GIVEN, temperature=args.temperature or NOT_GIVEN, prompt=args.prompt or NOT_GIVEN, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat response_format=cast(Any, args.response_format), ) print_model(model) @staticmethod def translate(args: CLITranslationArgs) -> None: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") model = get_client().audio.translations.create( file=(args.file, buffer_reader), model=args.model, temperature=args.temperature or NOT_GIVEN, prompt=args.prompt or NOT_GIVEN, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat response_format=cast(Any, args.response_format), ) print_model(model) openai-python-1.12.0/src/openai/cli/_api/chat/000077500000000000000000000000001456126711000210305ustar00rootroot00000000000000openai-python-1.12.0/src/openai/cli/_api/chat/__init__.py000066400000000000000000000004541456126711000231440ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING from argparse import ArgumentParser from . import completions if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: completions.register(subparser) openai-python-1.12.0/src/openai/cli/_api/chat/completions.py000066400000000000000000000123631456126711000237430ustar00rootroot00000000000000from __future__ import annotations import sys from typing import TYPE_CHECKING, List, Optional, cast from argparse import ArgumentParser from typing_extensions import Literal, NamedTuple from ..._utils import get_client from ..._models import BaseModel from ...._streaming import Stream from ....types.chat import ( ChatCompletionRole, ChatCompletionChunk, CompletionCreateParams, ) from ....types.chat.completion_create_params import ( CompletionCreateParamsStreaming, CompletionCreateParamsNonStreaming, ) if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("chat.completions.create") sub._action_groups.pop() req = sub.add_argument_group("required arguments") opt = sub.add_argument_group("optional arguments") req.add_argument( "-g", "--message", action="append", nargs=2, metavar=("ROLE", "CONTENT"), help="A message in `{role} {content}` format. Use this argument multiple times to add multiple messages.", required=True, ) req.add_argument( "-m", "--model", help="The model to use.", required=True, ) opt.add_argument( "-n", "--n", help="How many completions to generate for the conversation.", type=int, ) opt.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate.", type=int) opt.add_argument( "-t", "--temperature", help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Mutually exclusive with `top_p`.""", type=float, ) opt.add_argument( "-P", "--top_p", help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered. Mutually exclusive with `temperature`.""", type=float, ) opt.add_argument( "--stop", help="A stop sequence at which to stop generating tokens for the message.", ) opt.add_argument("--stream", help="Stream messages as they're ready.", action="store_true") sub.set_defaults(func=CLIChatCompletion.create, args_model=CLIChatCompletionCreateArgs) class CLIMessage(NamedTuple): role: ChatCompletionRole content: str class CLIChatCompletionCreateArgs(BaseModel): message: List[CLIMessage] model: str n: Optional[int] = None max_tokens: Optional[int] = None temperature: Optional[float] = None top_p: Optional[float] = None stop: Optional[str] = None stream: bool = False class CLIChatCompletion: @staticmethod def create(args: CLIChatCompletionCreateArgs) -> None: params: CompletionCreateParams = { "model": args.model, "messages": [ {"role": cast(Literal["user"], message.role), "content": message.content} for message in args.message ], "n": args.n, "temperature": args.temperature, "top_p": args.top_p, "stop": args.stop, # type checkers are not good at inferring union types so we have to set stream afterwards "stream": False, } if args.stream: params["stream"] = args.stream # type: ignore if args.max_tokens is not None: params["max_tokens"] = args.max_tokens if args.stream: return CLIChatCompletion._stream_create(cast(CompletionCreateParamsStreaming, params)) return CLIChatCompletion._create(cast(CompletionCreateParamsNonStreaming, params)) @staticmethod def _create(params: CompletionCreateParamsNonStreaming) -> None: completion = get_client().chat.completions.create(**params) should_print_header = len(completion.choices) > 1 for choice in completion.choices: if should_print_header: sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) content = choice.message.content if choice.message.content is not None else "None" sys.stdout.write(content) if should_print_header or not content.endswith("\n"): sys.stdout.write("\n") sys.stdout.flush() @staticmethod def _stream_create(params: CompletionCreateParamsStreaming) -> None: # cast is required for mypy stream = cast( # pyright: ignore[reportUnnecessaryCast] Stream[ChatCompletionChunk], get_client().chat.completions.create(**params) ) for chunk in stream: should_print_header = len(chunk.choices) > 1 for choice in chunk.choices: if should_print_header: sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) content = choice.delta.content or "" sys.stdout.write(content) if should_print_header: sys.stdout.write("\n") sys.stdout.flush() sys.stdout.write("\n") openai-python-1.12.0/src/openai/cli/_api/completions.py000066400000000000000000000144141456126711000230230ustar00rootroot00000000000000from __future__ import annotations import sys from typing import TYPE_CHECKING, Optional, cast from argparse import ArgumentParser from functools import partial from openai.types.completion import Completion from .._utils import get_client from ..._types import NOT_GIVEN, NotGivenOr from ..._utils import is_given from .._errors import CLIError from .._models import BaseModel from ..._streaming import Stream if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("completions.create") # Required sub.add_argument( "-m", "--model", help="The model to use", required=True, ) # Optional sub.add_argument("-p", "--prompt", help="An optional prompt to complete from") sub.add_argument("--stream", help="Stream tokens as they're ready.", action="store_true") sub.add_argument("-M", "--max-tokens", help="The maximum number of tokens to generate", type=int) sub.add_argument( "-t", "--temperature", help="""What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Mutually exclusive with `top_p`.""", type=float, ) sub.add_argument( "-P", "--top_p", help="""An alternative to sampling with temperature, called nucleus sampling, where the considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%% probability mass are considered. Mutually exclusive with `temperature`.""", type=float, ) sub.add_argument( "-n", "--n", help="How many sub-completions to generate for each prompt.", type=int, ) sub.add_argument( "--logprobs", help="Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. So for example, if `logprobs` is 10, the API will return a list of the 10 most likely tokens. If `logprobs` is 0, only the chosen tokens will have logprobs returned.", type=int, ) sub.add_argument( "--best_of", help="Generates `best_of` completions server-side and returns the 'best' (the one with the highest log probability per token). Results cannot be streamed.", type=int, ) sub.add_argument( "--echo", help="Echo back the prompt in addition to the completion", action="store_true", ) sub.add_argument( "--frequency_penalty", help="Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.", type=float, ) sub.add_argument( "--presence_penalty", help="Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.", type=float, ) sub.add_argument("--suffix", help="The suffix that comes after a completion of inserted text.") sub.add_argument("--stop", help="A stop sequence at which to stop generating tokens.") sub.add_argument( "--user", help="A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.", ) # TODO: add support for logit_bias sub.set_defaults(func=CLICompletions.create, args_model=CLICompletionCreateArgs) class CLICompletionCreateArgs(BaseModel): model: str stream: bool = False prompt: Optional[str] = None n: NotGivenOr[int] = NOT_GIVEN stop: NotGivenOr[str] = NOT_GIVEN user: NotGivenOr[str] = NOT_GIVEN echo: NotGivenOr[bool] = NOT_GIVEN suffix: NotGivenOr[str] = NOT_GIVEN best_of: NotGivenOr[int] = NOT_GIVEN top_p: NotGivenOr[float] = NOT_GIVEN logprobs: NotGivenOr[int] = NOT_GIVEN max_tokens: NotGivenOr[int] = NOT_GIVEN temperature: NotGivenOr[float] = NOT_GIVEN presence_penalty: NotGivenOr[float] = NOT_GIVEN frequency_penalty: NotGivenOr[float] = NOT_GIVEN class CLICompletions: @staticmethod def create(args: CLICompletionCreateArgs) -> None: if is_given(args.n) and args.n > 1 and args.stream: raise CLIError("Can't stream completions with n>1 with the current CLI") make_request = partial( get_client().completions.create, n=args.n, echo=args.echo, stop=args.stop, user=args.user, model=args.model, top_p=args.top_p, prompt=args.prompt, suffix=args.suffix, best_of=args.best_of, logprobs=args.logprobs, max_tokens=args.max_tokens, temperature=args.temperature, presence_penalty=args.presence_penalty, frequency_penalty=args.frequency_penalty, ) if args.stream: return CLICompletions._stream_create( # mypy doesn't understand the `partial` function but pyright does cast(Stream[Completion], make_request(stream=True)) # pyright: ignore[reportUnnecessaryCast] ) return CLICompletions._create(make_request()) @staticmethod def _create(completion: Completion) -> None: should_print_header = len(completion.choices) > 1 for choice in completion.choices: if should_print_header: sys.stdout.write("===== Completion {} =====\n".format(choice.index)) sys.stdout.write(choice.text) if should_print_header or not choice.text.endswith("\n"): sys.stdout.write("\n") sys.stdout.flush() @staticmethod def _stream_create(stream: Stream[Completion]) -> None: for completion in stream: should_print_header = len(completion.choices) > 1 for choice in sorted(completion.choices, key=lambda c: c.index): if should_print_header: sys.stdout.write("===== Chat Completion {} =====\n".format(choice.index)) sys.stdout.write(choice.text) if should_print_header: sys.stdout.write("\n") sys.stdout.flush() sys.stdout.write("\n") openai-python-1.12.0/src/openai/cli/_api/files.py000066400000000000000000000044511456126711000215710ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any, cast from argparse import ArgumentParser from .._utils import get_client, print_model from .._models import BaseModel from .._progress import BufferReader if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("files.create") sub.add_argument( "-f", "--file", required=True, help="File to upload", ) sub.add_argument( "-p", "--purpose", help="Why are you uploading this file? (see https://platform.openai.com/docs/api-reference/ for purposes)", required=True, ) sub.set_defaults(func=CLIFile.create, args_model=CLIFileCreateArgs) sub = subparser.add_parser("files.retrieve") sub.add_argument("-i", "--id", required=True, help="The files ID") sub.set_defaults(func=CLIFile.get, args_model=CLIFileCreateArgs) sub = subparser.add_parser("files.delete") sub.add_argument("-i", "--id", required=True, help="The files ID") sub.set_defaults(func=CLIFile.delete, args_model=CLIFileCreateArgs) sub = subparser.add_parser("files.list") sub.set_defaults(func=CLIFile.list) class CLIFileIDArgs(BaseModel): id: str class CLIFileCreateArgs(BaseModel): file: str purpose: str class CLIFile: @staticmethod def create(args: CLIFileCreateArgs) -> None: with open(args.file, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") file = get_client().files.create( file=(args.file, buffer_reader), # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat purpose=cast(Any, args.purpose), ) print_model(file) @staticmethod def get(args: CLIFileIDArgs) -> None: file = get_client().files.retrieve(file_id=args.id) print_model(file) @staticmethod def delete(args: CLIFileIDArgs) -> None: file = get_client().files.delete(file_id=args.id) print_model(file) @staticmethod def list() -> None: files = get_client().files.list() for file in files: print_model(file) openai-python-1.12.0/src/openai/cli/_api/image.py000066400000000000000000000117061456126711000215520ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any, cast from argparse import ArgumentParser from .._utils import get_client, print_model from ..._types import NOT_GIVEN, NotGiven, NotGivenOr from .._models import BaseModel from .._progress import BufferReader if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("images.generate") sub.add_argument("-m", "--model", type=str) sub.add_argument("-p", "--prompt", type=str, required=True) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") sub.add_argument("--response-format", type=str, default="url") sub.set_defaults(func=CLIImage.create, args_model=CLIImageCreateArgs) sub = subparser.add_parser("images.edit") sub.add_argument("-m", "--model", type=str) sub.add_argument("-p", "--prompt", type=str, required=True) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument( "-I", "--image", type=str, required=True, help="Image to modify. Should be a local path and a PNG encoded image.", ) sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") sub.add_argument("--response-format", type=str, default="url") sub.add_argument( "-M", "--mask", type=str, required=False, help="Path to a mask image. It should be the same size as the image you're editing and a RGBA PNG image. The Alpha channel acts as the mask.", ) sub.set_defaults(func=CLIImage.edit, args_model=CLIImageEditArgs) sub = subparser.add_parser("images.create_variation") sub.add_argument("-m", "--model", type=str) sub.add_argument("-n", "--num-images", type=int, default=1) sub.add_argument( "-I", "--image", type=str, required=True, help="Image to modify. Should be a local path and a PNG encoded image.", ) sub.add_argument("-s", "--size", type=str, default="1024x1024", help="Size of the output image") sub.add_argument("--response-format", type=str, default="url") sub.set_defaults(func=CLIImage.create_variation, args_model=CLIImageCreateVariationArgs) class CLIImageCreateArgs(BaseModel): prompt: str num_images: int size: str response_format: str model: NotGivenOr[str] = NOT_GIVEN class CLIImageCreateVariationArgs(BaseModel): image: str num_images: int size: str response_format: str model: NotGivenOr[str] = NOT_GIVEN class CLIImageEditArgs(BaseModel): image: str num_images: int size: str response_format: str prompt: str mask: NotGivenOr[str] = NOT_GIVEN model: NotGivenOr[str] = NOT_GIVEN class CLIImage: @staticmethod def create(args: CLIImageCreateArgs) -> None: image = get_client().images.generate( model=args.model, prompt=args.prompt, n=args.num_images, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat size=cast(Any, args.size), response_format=cast(Any, args.response_format), ) print_model(image) @staticmethod def create_variation(args: CLIImageCreateVariationArgs) -> None: with open(args.image, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Upload progress") image = get_client().images.create_variation( model=args.model, image=("image", buffer_reader), n=args.num_images, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat size=cast(Any, args.size), response_format=cast(Any, args.response_format), ) print_model(image) @staticmethod def edit(args: CLIImageEditArgs) -> None: with open(args.image, "rb") as file_reader: buffer_reader = BufferReader(file_reader.read(), desc="Image upload progress") if isinstance(args.mask, NotGiven): mask: NotGivenOr[BufferReader] = NOT_GIVEN else: with open(args.mask, "rb") as file_reader: mask = BufferReader(file_reader.read(), desc="Mask progress") image = get_client().images.edit( model=args.model, prompt=args.prompt, image=("image", buffer_reader), n=args.num_images, mask=("mask", mask) if not isinstance(mask, NotGiven) else mask, # casts required because the API is typed for enums # but we don't want to validate that here for forwards-compat size=cast(Any, args.size), response_format=cast(Any, args.response_format), ) print_model(image) openai-python-1.12.0/src/openai/cli/_api/models.py000066400000000000000000000024171456126711000217520ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING from argparse import ArgumentParser from .._utils import get_client, print_model from .._models import BaseModel if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("models.list") sub.set_defaults(func=CLIModels.list) sub = subparser.add_parser("models.retrieve") sub.add_argument("-i", "--id", required=True, help="The model ID") sub.set_defaults(func=CLIModels.get, args_model=CLIModelIDArgs) sub = subparser.add_parser("models.delete") sub.add_argument("-i", "--id", required=True, help="The model ID") sub.set_defaults(func=CLIModels.delete, args_model=CLIModelIDArgs) class CLIModelIDArgs(BaseModel): id: str class CLIModels: @staticmethod def get(args: CLIModelIDArgs) -> None: model = get_client().models.retrieve(model=args.id) print_model(model) @staticmethod def delete(args: CLIModelIDArgs) -> None: model = get_client().models.delete(model=args.id) print_model(model) @staticmethod def list() -> None: models = get_client().models.list() for model in models: print_model(model) openai-python-1.12.0/src/openai/cli/_cli.py000066400000000000000000000151271456126711000204670ustar00rootroot00000000000000from __future__ import annotations import sys import logging import argparse from typing import Any, List, Type, Optional from typing_extensions import ClassVar import httpx import pydantic import openai from . import _tools from .. import _ApiType, __version__ from ._api import register_commands from ._utils import can_use_http2 from .._types import ProxiesDict from ._errors import CLIError, display_error from .._compat import PYDANTIC_V2, ConfigDict, model_parse from .._models import BaseModel from .._exceptions import APIError logger = logging.getLogger() formatter = logging.Formatter("[%(asctime)s] %(message)s") handler = logging.StreamHandler(sys.stderr) handler.setFormatter(formatter) logger.addHandler(handler) class Arguments(BaseModel): if PYDANTIC_V2: model_config: ClassVar[ConfigDict] = ConfigDict( extra="ignore", ) else: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore verbosity: int version: Optional[str] = None api_key: Optional[str] api_base: Optional[str] organization: Optional[str] proxy: Optional[List[str]] api_type: Optional[_ApiType] = None api_version: Optional[str] = None # azure azure_endpoint: Optional[str] = None azure_ad_token: Optional[str] = None # internal, set by subparsers to parse their specific args args_model: Optional[Type[BaseModel]] = None # internal, used so that subparsers can forward unknown arguments unknown_args: List[str] = [] allow_unknown_args: bool = False def _build_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser(description=None, prog="openai") parser.add_argument( "-v", "--verbose", action="count", dest="verbosity", default=0, help="Set verbosity.", ) parser.add_argument("-b", "--api-base", help="What API base url to use.") parser.add_argument("-k", "--api-key", help="What API key to use.") parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.") parser.add_argument( "-o", "--organization", help="Which organization to run as (will use your default organization if not specified)", ) parser.add_argument( "-t", "--api-type", type=str, choices=("openai", "azure"), help="The backend API to call, must be `openai` or `azure`", ) parser.add_argument( "--api-version", help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'", ) # azure parser.add_argument( "--azure-endpoint", help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com'", ) parser.add_argument( "--azure-ad-token", help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id", ) # prints the package version parser.add_argument( "-V", "--version", action="version", version="%(prog)s " + __version__, ) def help() -> None: parser.print_help() parser.set_defaults(func=help) subparsers = parser.add_subparsers() sub_api = subparsers.add_parser("api", help="Direct API calls") register_commands(sub_api) sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience") _tools.register_commands(sub_tools, subparsers) return parser def main() -> int: try: _main() except (APIError, CLIError, pydantic.ValidationError) as err: display_error(err) return 1 except KeyboardInterrupt: sys.stderr.write("\n") return 1 return 0 def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]: # argparse by default will strip out the `--` but we want to keep it for unknown arguments if "--" in sys.argv: idx = sys.argv.index("--") known_args = sys.argv[1:idx] unknown_args = sys.argv[idx:] else: known_args = sys.argv[1:] unknown_args = [] parsed, remaining_unknown = parser.parse_known_args(known_args) # append any remaining unknown arguments from the initial parsing remaining_unknown.extend(unknown_args) args = model_parse(Arguments, vars(parsed)) if not args.allow_unknown_args: # we have to parse twice to ensure any unknown arguments # result in an error if that behaviour is desired parser.parse_args() return parsed, args, remaining_unknown def _main() -> None: parser = _build_parser() parsed, args, unknown = _parse_args(parser) if args.verbosity != 0: sys.stderr.write("Warning: --verbosity isn't supported yet\n") proxies: ProxiesDict = {} if args.proxy is not None: for proxy in args.proxy: key = "https://" if proxy.startswith("https") else "http://" if key in proxies: raise CLIError(f"Multiple {key} proxies given - only the last one would be used") proxies[key] = proxy http_client = httpx.Client( proxies=proxies or None, http2=can_use_http2(), ) openai.http_client = http_client if args.organization: openai.organization = args.organization if args.api_key: openai.api_key = args.api_key if args.api_base: openai.base_url = args.api_base # azure if args.api_type is not None: openai.api_type = args.api_type if args.azure_endpoint is not None: openai.azure_endpoint = args.azure_endpoint if args.api_version is not None: openai.api_version = args.api_version if args.azure_ad_token is not None: openai.azure_ad_token = args.azure_ad_token try: if args.args_model: parsed.func( model_parse( args.args_model, { **{ # we omit None values so that they can be defaulted to `NotGiven` # and we'll strip it from the API request key: value for key, value in vars(parsed).items() if value is not None }, "unknown_args": unknown, }, ) ) else: parsed.func() finally: try: http_client.close() except Exception: pass if __name__ == "__main__": sys.exit(main()) openai-python-1.12.0/src/openai/cli/_errors.py000066400000000000000000000007371456126711000212350ustar00rootroot00000000000000from __future__ import annotations import sys import pydantic from ._utils import Colors, organization_info from .._exceptions import APIError, OpenAIError class CLIError(OpenAIError): ... class SilentCLIError(CLIError): ... def display_error(err: CLIError | APIError | pydantic.ValidationError) -> None: if isinstance(err, SilentCLIError): return sys.stderr.write("{}{}Error:{} {}\n".format(organization_info(), Colors.FAIL, Colors.ENDC, err)) openai-python-1.12.0/src/openai/cli/_models.py000066400000000000000000000007531456126711000212020ustar00rootroot00000000000000from typing import Any from typing_extensions import ClassVar import pydantic from .. import _models from .._compat import PYDANTIC_V2, ConfigDict class BaseModel(_models.BaseModel): if PYDANTIC_V2: model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore", arbitrary_types_allowed=True) else: class Config(pydantic.BaseConfig): # type: ignore extra: Any = pydantic.Extra.ignore # type: ignore arbitrary_types_allowed: bool = True openai-python-1.12.0/src/openai/cli/_progress.py000066400000000000000000000025761456126711000215700ustar00rootroot00000000000000from __future__ import annotations import io from typing import Callable from typing_extensions import override class CancelledError(Exception): def __init__(self, msg: str) -> None: self.msg = msg super().__init__(msg) @override def __str__(self) -> str: return self.msg __repr__ = __str__ class BufferReader(io.BytesIO): def __init__(self, buf: bytes = b"", desc: str | None = None) -> None: super().__init__(buf) self._len = len(buf) self._progress = 0 self._callback = progress(len(buf), desc=desc) def __len__(self) -> int: return self._len @override def read(self, n: int | None = -1) -> bytes: chunk = io.BytesIO.read(self, n) self._progress += len(chunk) try: self._callback(self._progress) except Exception as e: # catches exception from the callback raise CancelledError("The upload was cancelled: {}".format(e)) from e return chunk def progress(total: float, desc: str | None) -> Callable[[float], None]: import tqdm meter = tqdm.tqdm(total=total, unit_scale=True, desc=desc) def incr(progress: float) -> None: meter.n = progress if progress == total: meter.close() else: meter.refresh() return incr def MB(i: int) -> int: return int(i // 1024**2) openai-python-1.12.0/src/openai/cli/_tools/000077500000000000000000000000001456126711000205005ustar00rootroot00000000000000openai-python-1.12.0/src/openai/cli/_tools/__init__.py000066400000000000000000000000721456126711000226100ustar00rootroot00000000000000from ._main import register_commands as register_commands openai-python-1.12.0/src/openai/cli/_tools/_main.py000066400000000000000000000007231456126711000221370ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING from argparse import ArgumentParser from . import migrate, fine_tunes if TYPE_CHECKING: from argparse import _SubParsersAction def register_commands(parser: ArgumentParser, subparser: _SubParsersAction[ArgumentParser]) -> None: migrate.register(subparser) namespaced = parser.add_subparsers(title="Tools", help="Convenience client side tools") fine_tunes.register(namespaced) openai-python-1.12.0/src/openai/cli/_tools/fine_tunes.py000066400000000000000000000030071456126711000232110ustar00rootroot00000000000000from __future__ import annotations import sys from typing import TYPE_CHECKING from argparse import ArgumentParser from .._models import BaseModel from ...lib._validators import ( get_validators, write_out_file, read_any_format, apply_validators, apply_necessary_remediation, ) if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("fine_tunes.prepare_data") sub.add_argument( "-f", "--file", required=True, help="JSONL, JSON, CSV, TSV, TXT or XLSX file containing prompt-completion examples to be analyzed." "This should be the local file path.", ) sub.add_argument( "-q", "--quiet", required=False, action="store_true", help="Auto accepts all suggestions, without asking for user input. To be used within scripts.", ) sub.set_defaults(func=prepare_data, args_model=PrepareDataArgs) class PrepareDataArgs(BaseModel): file: str quiet: bool def prepare_data(args: PrepareDataArgs) -> None: sys.stdout.write("Analyzing...\n") fname = args.file auto_accept = args.quiet df, remediation = read_any_format(fname) apply_necessary_remediation(None, remediation) validators = get_validators() assert df is not None apply_validators( df, fname, remediation, validators, auto_accept, write_out_file_func=write_out_file, ) openai-python-1.12.0/src/openai/cli/_tools/migrate.py000066400000000000000000000114371456126711000225100ustar00rootroot00000000000000from __future__ import annotations import os import sys import json import shutil import tarfile import platform import subprocess from typing import TYPE_CHECKING, List from pathlib import Path from argparse import ArgumentParser import httpx from .._errors import CLIError, SilentCLIError from .._models import BaseModel if TYPE_CHECKING: from argparse import _SubParsersAction def register(subparser: _SubParsersAction[ArgumentParser]) -> None: sub = subparser.add_parser("migrate") sub.set_defaults(func=migrate, args_model=MigrateArgs, allow_unknown_args=True) sub = subparser.add_parser("grit") sub.set_defaults(func=grit, args_model=GritArgs, allow_unknown_args=True) class GritArgs(BaseModel): # internal unknown_args: List[str] = [] def grit(args: GritArgs) -> None: grit_path = install() try: subprocess.check_call([grit_path, *args.unknown_args]) except subprocess.CalledProcessError: # stdout and stderr are forwarded by subprocess so an error will already # have been displayed raise SilentCLIError() from None class MigrateArgs(BaseModel): # internal unknown_args: List[str] = [] def migrate(args: MigrateArgs) -> None: grit_path = install() try: subprocess.check_call([grit_path, "apply", "openai", *args.unknown_args]) except subprocess.CalledProcessError: # stdout and stderr are forwarded by subprocess so an error will already # have been displayed raise SilentCLIError() from None # handles downloading the Grit CLI until they provide their own PyPi package KEYGEN_ACCOUNT = "custodian-dev" def _cache_dir() -> Path: xdg = os.environ.get("XDG_CACHE_HOME") if xdg is not None: return Path(xdg) return Path.home() / ".cache" def _debug(message: str) -> None: if not os.environ.get("DEBUG"): return sys.stdout.write(f"[DEBUG]: {message}\n") def install() -> Path: """Installs the Grit CLI and returns the location of the binary""" if sys.platform == "win32": raise CLIError("Windows is not supported yet in the migration CLI") platform = "macos" if sys.platform == "darwin" else "linux" dir_name = _cache_dir() / "openai-python" install_dir = dir_name / ".install" target_dir = install_dir / "bin" target_path = target_dir / "marzano" temp_file = target_dir / "marzano.tmp" if target_path.exists(): _debug(f"{target_path} already exists") sys.stdout.flush() return target_path _debug(f"Using Grit CLI path: {target_path}") target_dir.mkdir(parents=True, exist_ok=True) if temp_file.exists(): temp_file.unlink() arch = _get_arch() _debug(f"Using architecture {arch}") file_name = f"marzano-{platform}-{arch}" meta_url = f"https://api.keygen.sh/v1/accounts/{KEYGEN_ACCOUNT}/artifacts/{file_name}" sys.stdout.write(f"Retrieving Grit CLI metadata from {meta_url}\n") with httpx.Client() as client: response = client.get(meta_url) # pyright: ignore[reportUnknownMemberType] data = response.json() errors = data.get("errors") if errors: for error in errors: sys.stdout.write(f"{error}\n") raise CLIError("Could not locate Grit CLI binary - see above errors") write_manifest(install_dir, data["data"]["relationships"]["release"]["data"]["id"]) link = data["data"]["links"]["redirect"] _debug(f"Redirect URL {link}") download_response = client.get(link) # pyright: ignore[reportUnknownMemberType] with open(temp_file, "wb") as file: for chunk in download_response.iter_bytes(): file.write(chunk) unpacked_dir = target_dir / "cli-bin" unpacked_dir.mkdir(parents=True, exist_ok=True) with tarfile.open(temp_file, "r:gz") as archive: archive.extractall(unpacked_dir) for item in unpacked_dir.iterdir(): item.rename(target_dir / item.name) shutil.rmtree(unpacked_dir) os.remove(temp_file) os.chmod(target_path, 0o755) sys.stdout.flush() return target_path def _get_arch() -> str: architecture = platform.machine().lower() # Map the architecture names to Node.js equivalents arch_map = { "x86_64": "x64", "amd64": "x64", "armv7l": "arm", "aarch64": "arm64", } return arch_map.get(architecture, architecture) def write_manifest(install_path: Path, release: str) -> None: manifest = { "installPath": str(install_path), "binaries": { "marzano": { "name": "marzano", "release": release, }, }, } manifest_path = Path(install_path) / "manifests.json" with open(manifest_path, "w") as f: json.dump(manifest, f, indent=2) openai-python-1.12.0/src/openai/cli/_utils.py000066400000000000000000000015201456126711000210500ustar00rootroot00000000000000from __future__ import annotations import sys import openai from .. import OpenAI, _load_client from .._compat import model_json from .._models import BaseModel class Colors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" def get_client() -> OpenAI: return _load_client() def organization_info() -> str: organization = openai.organization if organization is not None: return "[organization={}] ".format(organization) return "" def print_model(model: BaseModel) -> None: sys.stdout.write(model_json(model, indent=2) + "\n") def can_use_http2() -> bool: try: import h2 # type: ignore # noqa except ImportError: return False return True openai-python-1.12.0/src/openai/lib/000077500000000000000000000000001456126711000172005ustar00rootroot00000000000000openai-python-1.12.0/src/openai/lib/.keep000066400000000000000000000003401456126711000201220ustar00rootroot00000000000000File generated from our OpenAPI spec by Stainless. This directory can be used to store custom files to expand the SDK. It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.openai-python-1.12.0/src/openai/lib/_old_api.py000066400000000000000000000036331456126711000213250ustar00rootroot00000000000000from __future__ import annotations from typing import TYPE_CHECKING, Any from typing_extensions import override from .._utils import LazyProxy from .._exceptions import OpenAIError INSTRUCTIONS = """ You tried to access openai.{symbol}, but this is no longer supported in openai>=1.0.0 - see the README at https://github.com/openai/openai-python for the API. You can run `openai migrate` to automatically upgrade your codebase to use the 1.0.0 interface. Alternatively, you can pin your installation to the old version, e.g. `pip install openai==0.28` A detailed migration guide is available here: https://github.com/openai/openai-python/discussions/742 """ class APIRemovedInV1(OpenAIError): def __init__(self, *, symbol: str) -> None: super().__init__(INSTRUCTIONS.format(symbol=symbol)) class APIRemovedInV1Proxy(LazyProxy[Any]): def __init__(self, *, symbol: str) -> None: super().__init__() self._symbol = symbol @override def __load__(self) -> Any: # return the proxy until it is eventually called so that # we don't break people that are just checking the attributes # of a module return self def __call__(self, *_args: Any, **_kwargs: Any) -> Any: raise APIRemovedInV1(symbol=self._symbol) SYMBOLS = [ "Edit", "File", "Audio", "Image", "Model", "Engine", "Customer", "FineTune", "Embedding", "Completion", "Deployment", "Moderation", "ErrorObject", "FineTuningJob", "ChatCompletion", ] # we explicitly tell type checkers that nothing is exported # from this file so that when we re-export the old symbols # in `openai/__init__.py` they aren't added to the auto-complete # suggestions given by editors if TYPE_CHECKING: __all__: list[str] = [] else: __all__ = SYMBOLS __locals = locals() for symbol in SYMBOLS: __locals[symbol] = APIRemovedInV1Proxy(symbol=symbol) openai-python-1.12.0/src/openai/lib/_validators.py000066400000000000000000001045651456126711000220740ustar00rootroot00000000000000# pyright: basic from __future__ import annotations import os import sys from typing import Any, TypeVar, Callable, Optional, NamedTuple from typing_extensions import TypeAlias from .._extras import pandas as pd class Remediation(NamedTuple): name: str immediate_msg: Optional[str] = None necessary_msg: Optional[str] = None necessary_fn: Optional[Callable[[Any], Any]] = None optional_msg: Optional[str] = None optional_fn: Optional[Callable[[Any], Any]] = None error_msg: Optional[str] = None OptionalDataFrameT = TypeVar("OptionalDataFrameT", bound="Optional[pd.DataFrame]") def num_examples_validator(df: pd.DataFrame) -> Remediation: """ This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100. """ MIN_EXAMPLES = 100 optional_suggestion = ( "" if len(df) >= MIN_EXAMPLES else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples" ) immediate_msg = f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}" return Remediation(name="num_examples", immediate_msg=immediate_msg) def necessary_column_validator(df: pd.DataFrame, necessary_column: str) -> Remediation: """ This validator will ensure that the necessary column is present in the dataframe. """ def lower_case_column(df: pd.DataFrame, column: Any) -> pd.DataFrame: cols = [c for c in df.columns if str(c).lower() == column] df.rename(columns={cols[0]: column.lower()}, inplace=True) return df immediate_msg = None necessary_fn = None necessary_msg = None error_msg = None if necessary_column not in df.columns: if necessary_column in [str(c).lower() for c in df.columns]: def lower_case_column_creator(df: pd.DataFrame) -> pd.DataFrame: return lower_case_column(df, necessary_column) necessary_fn = lower_case_column_creator immediate_msg = f"\n- The `{necessary_column}` column/key should be lowercase" necessary_msg = f"Lower case column name to `{necessary_column}`" else: error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry" return Remediation( name="necessary_column", immediate_msg=immediate_msg, necessary_msg=necessary_msg, necessary_fn=necessary_fn, error_msg=error_msg, ) def additional_column_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation: """ This validator will remove additional columns from the dataframe. """ additional_columns = [] necessary_msg = None immediate_msg = None necessary_fn = None # type: ignore if len(df.columns) > 2: additional_columns = [c for c in df.columns if c not in fields] warn_message = "" for ac in additional_columns: dups = [c for c in additional_columns if ac in c] if len(dups) > 0: warn_message += f"\n WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file." immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}" necessary_msg = f"Remove additional columns/keys: {additional_columns}" def necessary_fn(x: Any) -> Any: return x[fields] return Remediation( name="additional_column", immediate_msg=immediate_msg, necessary_msg=necessary_msg, necessary_fn=necessary_fn, ) def non_empty_field_validator(df: pd.DataFrame, field: str = "completion") -> Remediation: """ This validator will ensure that no completion is empty. """ necessary_msg = None necessary_fn = None # type: ignore immediate_msg = None if df[field].apply(lambda x: x == "").any() or df[field].isnull().any(): empty_rows = (df[field] == "") | (df[field].isnull()) empty_indexes = df.reset_index().index[empty_rows].tolist() immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}" def necessary_fn(x: Any) -> Any: return x[x[field] != ""].dropna(subset=[field]) necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s" return Remediation( name=f"empty_{field}", immediate_msg=immediate_msg, necessary_msg=necessary_msg, necessary_fn=necessary_fn, ) def duplicated_rows_validator(df: pd.DataFrame, fields: list[str] = ["prompt", "completion"]) -> Remediation: """ This validator will suggest to the user to remove duplicate rows if they exist. """ duplicated_rows = df.duplicated(subset=fields) duplicated_indexes = df.reset_index().index[duplicated_rows].tolist() immediate_msg = None optional_msg = None optional_fn = None # type: ignore if len(duplicated_indexes) > 0: immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}" optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows" def optional_fn(x: Any) -> Any: return x.drop_duplicates(subset=fields) return Remediation( name="duplicated_rows", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, ) def long_examples_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to the user to remove examples that are too long. """ immediate_msg = None optional_msg = None optional_fn = None # type: ignore ft_type = infer_task_type(df) if ft_type != "open-ended generation": def get_long_indexes(d: pd.DataFrame) -> Any: long_examples = d.apply(lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1) return d.reset_index().index[long_examples].tolist() long_indexes = get_long_indexes(df) if len(long_indexes) > 0: immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens." optional_msg = f"Remove {len(long_indexes)} long examples" def optional_fn(x: Any) -> Any: long_indexes_to_drop = get_long_indexes(x) if long_indexes != long_indexes_to_drop: sys.stdout.write( f"The indices of the long examples has changed as a result of a previously applied recommendation.\nThe {len(long_indexes_to_drop)} long examples to be dropped are now at the following indices: {long_indexes_to_drop}\n" ) return x.drop(long_indexes_to_drop) return Remediation( name="long_examples", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, ) def common_prompt_suffix_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation. """ error_msg = None immediate_msg = None optional_msg = None optional_fn = None # type: ignore # Find a suffix which is not contained within the prompt otherwise suggested_suffix = "\n\n### =>\n\n" suffix_options = [ " ->", "\n\n###\n\n", "\n\n===\n\n", "\n\n---\n\n", "\n\n===>\n\n", "\n\n--->\n\n", ] for suffix_option in suffix_options: if suffix_option == " ->": if df.prompt.str.contains("\n").any(): continue if df.prompt.str.contains(suffix_option, regex=False).any(): continue suggested_suffix = suffix_option break display_suggested_suffix = suggested_suffix.replace("\n", "\\n") ft_type = infer_task_type(df) if ft_type == "open-ended generation": return Remediation(name="common_suffix") def add_suffix(x: Any, suffix: Any) -> Any: x["prompt"] += suffix return x common_suffix = get_common_xfix(df.prompt, xfix="suffix") if (df.prompt == common_suffix).all(): error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different" return Remediation(name="common_suffix", error_msg=error_msg) if common_suffix != "": common_suffix_new_line_handled = common_suffix.replace("\n", "\\n") immediate_msg = f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`" if len(common_suffix) > 10: immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`" if df.prompt.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any(): immediate_msg += f"\n WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix" else: immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty" if common_suffix == "": optional_msg = f"Add a suffix separator `{display_suggested_suffix}` to all prompts" def optional_fn(x: Any) -> Any: return add_suffix(x, suggested_suffix) return Remediation( name="common_completion_suffix", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, error_msg=error_msg, ) def common_prompt_prefix_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to remove a common prefix from the prompt if a long one exist. """ MAX_PREFIX_LEN = 12 immediate_msg = None optional_msg = None optional_fn = None # type: ignore common_prefix = get_common_xfix(df.prompt, xfix="prefix") if common_prefix == "": return Remediation(name="common_prefix") def remove_common_prefix(x: Any, prefix: Any) -> Any: x["prompt"] = x["prompt"].str[len(prefix) :] return x if (df.prompt == common_prefix).all(): # already handled by common_suffix_validator return Remediation(name="common_prefix") if common_prefix != "": immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`" if MAX_PREFIX_LEN < len(common_prefix): immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion" optional_msg = f"Remove prefix `{common_prefix}` from all prompts" def optional_fn(x: Any) -> Any: return remove_common_prefix(x, common_prefix) return Remediation( name="common_prompt_prefix", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, ) def common_completion_prefix_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to remove a common prefix from the completion if a long one exist. """ MAX_PREFIX_LEN = 5 common_prefix = get_common_xfix(df.completion, xfix="prefix") ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " " if len(common_prefix) < MAX_PREFIX_LEN: return Remediation(name="common_prefix") def remove_common_prefix(x: Any, prefix: Any, ws_prefix: Any) -> Any: x["completion"] = x["completion"].str[len(prefix) :] if ws_prefix: # keep the single whitespace as prefix x["completion"] = f" {x['completion']}" return x if (df.completion == common_prefix).all(): # already handled by common_suffix_validator return Remediation(name="common_prefix") immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix" optional_msg = f"Remove prefix `{common_prefix}` from all completions" def optional_fn(x: Any) -> Any: return remove_common_prefix(x, common_prefix, ws_prefix) return Remediation( name="common_completion_prefix", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, ) def common_completion_suffix_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation. """ error_msg = None immediate_msg = None optional_msg = None optional_fn = None # type: ignore ft_type = infer_task_type(df) if ft_type == "open-ended generation" or ft_type == "classification": return Remediation(name="common_suffix") common_suffix = get_common_xfix(df.completion, xfix="suffix") if (df.completion == common_suffix).all(): error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`" return Remediation(name="common_suffix", error_msg=error_msg) # Find a suffix which is not contained within the completion otherwise suggested_suffix = " [END]" suffix_options = [ "\n", ".", " END", "***", "+++", "&&&", "$$$", "@@@", "%%%", ] for suffix_option in suffix_options: if df.completion.str.contains(suffix_option, regex=False).any(): continue suggested_suffix = suffix_option break display_suggested_suffix = suggested_suffix.replace("\n", "\\n") def add_suffix(x: Any, suffix: Any) -> Any: x["completion"] += suffix return x if common_suffix != "": common_suffix_new_line_handled = common_suffix.replace("\n", "\\n") immediate_msg = f"\n- All completions end with suffix `{common_suffix_new_line_handled}`" if len(common_suffix) > 10: immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`" if df.completion.str[: -len(common_suffix)].str.contains(common_suffix, regex=False).any(): immediate_msg += f"\n WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending" else: immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples." if common_suffix == "": optional_msg = f"Add a suffix ending `{display_suggested_suffix}` to all completions" def optional_fn(x: Any) -> Any: return add_suffix(x, suggested_suffix) return Remediation( name="common_completion_suffix", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, error_msg=error_msg, ) def completions_space_start_validator(df: pd.DataFrame) -> Remediation: """ This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization. """ def add_space_start(x: Any) -> Any: x["completion"] = x["completion"].apply(lambda s: ("" if s.startswith(" ") else " ") + s) return x optional_msg = None optional_fn = None immediate_msg = None if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ": immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details" optional_msg = "Add a whitespace character to the beginning of the completion" optional_fn = add_space_start return Remediation( name="completion_space_start", immediate_msg=immediate_msg, optional_msg=optional_msg, optional_fn=optional_fn, ) def lower_case_validator(df: pd.DataFrame, column: Any) -> Remediation | None: """ This validator will suggest to lowercase the column values, if more than a third of letters are uppercase. """ def lower_case(x: Any) -> Any: x[column] = x[column].str.lower() return x count_upper = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper())).sum() count_lower = df[column].apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower())).sum() if count_upper * 2 > count_lower: return Remediation( name="lower_case", immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://platform.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details", optional_msg=f"Lowercase all your data in column/key `{column}`", optional_fn=lower_case, ) return None def read_any_format( fname: str, fields: list[str] = ["prompt", "completion"] ) -> tuple[pd.DataFrame | None, Remediation]: """ This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas. - for .xlsx it will read the first sheet - for .txt it will assume completions and split on newline """ remediation = None necessary_msg = None immediate_msg = None error_msg = None df = None if os.path.isfile(fname): try: if fname.lower().endswith(".csv") or fname.lower().endswith(".tsv"): file_extension_str, separator = ("CSV", ",") if fname.lower().endswith(".csv") else ("TSV", "\t") immediate_msg = ( f"\n- Based on your file extension, your file is formatted as a {file_extension_str} file" ) necessary_msg = f"Your format `{file_extension_str}` will be converted to `JSONL`" df = pd.read_csv(fname, sep=separator, dtype=str).fillna("") elif fname.lower().endswith(".xlsx"): immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file" necessary_msg = "Your format `XLSX` will be converted to `JSONL`" xls = pd.ExcelFile(fname) sheets = xls.sheet_names if len(sheets) > 1: immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..." df = pd.read_excel(fname, dtype=str).fillna("") elif fname.lower().endswith(".txt"): immediate_msg = "\n- Based on your file extension, you provided a text file" necessary_msg = "Your format `TXT` will be converted to `JSONL`" with open(fname, "r") as f: content = f.read() df = pd.DataFrame( [["", line] for line in content.split("\n")], columns=fields, dtype=str, ).fillna("") elif fname.lower().endswith(".jsonl"): df = pd.read_json(fname, lines=True, dtype=str).fillna("") # type: ignore if len(df) == 1: # type: ignore # this is NOT what we expect for a .jsonl file immediate_msg = "\n- Your JSONL file appears to be in a JSON format. Your file will be converted to JSONL format" necessary_msg = "Your format `JSON` will be converted to `JSONL`" df = pd.read_json(fname, dtype=str).fillna("") # type: ignore else: pass # this is what we expect for a .jsonl file elif fname.lower().endswith(".json"): try: # to handle case where .json file is actually a .jsonl file df = pd.read_json(fname, lines=True, dtype=str).fillna("") # type: ignore if len(df) == 1: # type: ignore # this code path corresponds to a .json file that has one line df = pd.read_json(fname, dtype=str).fillna("") # type: ignore else: # this is NOT what we expect for a .json file immediate_msg = "\n- Your JSON file appears to be in a JSONL format. Your file will be converted to JSONL format" necessary_msg = "Your format `JSON` will be converted to `JSONL`" except ValueError: # this code path corresponds to a .json file that has multiple lines (i.e. it is indented) df = pd.read_json(fname, dtype=str).fillna("") # type: ignore else: error_msg = ( "Your file must have one of the following extensions: .CSV, .TSV, .XLSX, .TXT, .JSON or .JSONL" ) if "." in fname: error_msg += f" Your file `{fname}` ends with the extension `.{fname.split('.')[-1]}` which is not supported." else: error_msg += f" Your file `{fname}` is missing a file extension." except (ValueError, TypeError): file_extension_str = fname.split(".")[-1].upper() error_msg = f"Your file `{fname}` does not appear to be in valid {file_extension_str} format. Please ensure your file is formatted as a valid {file_extension_str} file." else: error_msg = f"File {fname} does not exist." remediation = Remediation( name="read_any_format", necessary_msg=necessary_msg, immediate_msg=immediate_msg, error_msg=error_msg, ) return df, remediation def format_inferrer_validator(df: pd.DataFrame) -> Remediation: """ This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification. It will also suggest to use ada and explain train/validation split benefits. """ ft_type = infer_task_type(df) immediate_msg = None if ft_type == "classification": immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training" return Remediation(name="num_examples", immediate_msg=immediate_msg) def apply_necessary_remediation(df: OptionalDataFrameT, remediation: Remediation) -> OptionalDataFrameT: """ This function will apply a necessary remediation to a dataframe, or print an error message if one exists. """ if remediation.error_msg is not None: sys.stderr.write(f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting...") sys.exit(1) if remediation.immediate_msg is not None: sys.stdout.write(remediation.immediate_msg) if remediation.necessary_fn is not None: df = remediation.necessary_fn(df) return df def accept_suggestion(input_text: str, auto_accept: bool) -> bool: sys.stdout.write(input_text) if auto_accept: sys.stdout.write("Y\n") return True return input().lower() != "n" def apply_optional_remediation( df: pd.DataFrame, remediation: Remediation, auto_accept: bool ) -> tuple[pd.DataFrame, bool]: """ This function will apply an optional remediation to a dataframe, based on the user input. """ optional_applied = False input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: " if remediation.optional_msg is not None: if accept_suggestion(input_text, auto_accept): assert remediation.optional_fn is not None df = remediation.optional_fn(df) optional_applied = True if remediation.necessary_msg is not None: sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n") return df, optional_applied def estimate_fine_tuning_time(df: pd.DataFrame) -> None: """ Estimate the time it'll take to fine-tune the dataset """ ft_format = infer_task_type(df) expected_time = 1.0 if ft_format == "classification": num_examples = len(df) expected_time = num_examples * 1.44 else: size = df.memory_usage(index=True).sum() expected_time = size * 0.0515 def format_time(time: float) -> str: if time < 60: return f"{round(time, 2)} seconds" elif time < 3600: return f"{round(time / 60, 2)} minutes" elif time < 86400: return f"{round(time / 3600, 2)} hours" else: return f"{round(time / 86400, 2)} days" time_string = format_time(expected_time + 140) sys.stdout.write( f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n" ) def get_outfnames(fname: str, split: bool) -> list[str]: suffixes = ["_train", "_valid"] if split else [""] i = 0 while True: index_suffix = f" ({i})" if i > 0 else "" candidate_fnames = [f"{os.path.splitext(fname)[0]}_prepared{suffix}{index_suffix}.jsonl" for suffix in suffixes] if not any(os.path.isfile(f) for f in candidate_fnames): return candidate_fnames i += 1 def get_classification_hyperparams(df: pd.DataFrame) -> tuple[int, object]: n_classes = df.completion.nunique() pos_class = None if n_classes == 2: pos_class = df.completion.value_counts().index[0] return n_classes, pos_class def write_out_file(df: pd.DataFrame, fname: str, any_remediations: bool, auto_accept: bool) -> None: """ This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file. For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set. """ ft_format = infer_task_type(df) common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix") common_completion_suffix = get_common_xfix(df.completion, xfix="suffix") split = False input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: " if ft_format == "classification": if accept_suggestion(input_text, auto_accept): split = True additional_params = "" common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n") common_completion_suffix_new_line_handled = common_completion_suffix.replace("\n", "\\n") optional_ending_string = ( f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.' if len(common_completion_suffix_new_line_handled) > 0 else "" ) input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: " if not any_remediations and not split: sys.stdout.write( f'\nYou can use your file for fine-tuning:\n> openai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n' ) estimate_fine_tuning_time(df) elif accept_suggestion(input_text, auto_accept): fnames = get_outfnames(fname, split) if split: assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1] MAX_VALID_EXAMPLES = 1000 n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8)) df_train = df.sample(n=n_train, random_state=42) df_valid = df.drop(df_train.index) df_train[["prompt", "completion"]].to_json( # type: ignore fnames[0], lines=True, orient="records", force_ascii=False ) df_valid[["prompt", "completion"]].to_json(fnames[1], lines=True, orient="records", force_ascii=False) n_classes, pos_class = get_classification_hyperparams(df) additional_params += " --compute_classification_metrics" if n_classes == 2: additional_params += f' --classification_positive_class "{pos_class}"' else: additional_params += f" --classification_n_classes {n_classes}" else: assert len(fnames) == 1 df[["prompt", "completion"]].to_json(fnames[0], lines=True, orient="records", force_ascii=False) # Add -v VALID_FILE if we split the file into train / valid files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames)) valid_string = f' -v "{fnames[1]}"' if split else "" separator_reminder = ( "" if len(common_prompt_suffix_new_line_handled) == 0 else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt." ) sys.stdout.write( f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> openai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n' ) estimate_fine_tuning_time(df) else: sys.stdout.write("Aborting... did not write the file\n") def infer_task_type(df: pd.DataFrame) -> str: """ Infer the likely fine-tuning task type from the data """ CLASSIFICATION_THRESHOLD = 3 # min_average instances of each class if sum(df.prompt.str.len()) == 0: return "open-ended generation" if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD: return "classification" return "conditional generation" def get_common_xfix(series: Any, xfix: str = "suffix") -> str: """ Finds the longest common suffix or prefix of all the values in a series """ common_xfix = "" while True: common_xfixes = ( series.str[-(len(common_xfix) + 1) :] if xfix == "suffix" else series.str[: len(common_xfix) + 1] ) # first few or last few characters if common_xfixes.nunique() != 1: # we found the character at which we don't have a unique xfix anymore break elif common_xfix == common_xfixes.values[0]: # the entire first row is a prefix of every other row break else: # the first or last few characters are still common across all rows - let's try to add one more common_xfix = common_xfixes.values[0] return common_xfix Validator: TypeAlias = "Callable[[pd.DataFrame], Remediation | None]" def get_validators() -> list[Validator]: return [ num_examples_validator, lambda x: necessary_column_validator(x, "prompt"), lambda x: necessary_column_validator(x, "completion"), additional_column_validator, non_empty_field_validator, format_inferrer_validator, duplicated_rows_validator, long_examples_validator, lambda x: lower_case_validator(x, "prompt"), lambda x: lower_case_validator(x, "completion"), common_prompt_suffix_validator, common_prompt_prefix_validator, common_completion_prefix_validator, common_completion_suffix_validator, completions_space_start_validator, ] def apply_validators( df: pd.DataFrame, fname: str, remediation: Remediation | None, validators: list[Validator], auto_accept: bool, write_out_file_func: Callable[..., Any], ) -> None: optional_remediations: list[Remediation] = [] if remediation is not None: optional_remediations.append(remediation) for validator in validators: remediation = validator(df) if remediation is not None: optional_remediations.append(remediation) df = apply_necessary_remediation(df, remediation) any_optional_or_necessary_remediations = any( [ remediation for remediation in optional_remediations if remediation.optional_msg is not None or remediation.necessary_msg is not None ] ) any_necessary_applied = any( [remediation for remediation in optional_remediations if remediation.necessary_msg is not None] ) any_optional_applied = False if any_optional_or_necessary_remediations: sys.stdout.write("\n\nBased on the analysis we will perform the following actions:\n") for remediation in optional_remediations: df, optional_applied = apply_optional_remediation(df, remediation, auto_accept) any_optional_applied = any_optional_applied or optional_applied else: sys.stdout.write("\n\nNo remediations found.\n") any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept) openai-python-1.12.0/src/openai/lib/azure.py000066400000000000000000000507251456126711000207110ustar00rootroot00000000000000from __future__ import annotations import os import inspect from typing import Any, Union, Mapping, TypeVar, Callable, Awaitable, overload from typing_extensions import Self, override import httpx from .._types import NOT_GIVEN, Omit, Timeout, NotGiven from .._utils import is_given, is_mapping from .._client import OpenAI, AsyncOpenAI from .._models import FinalRequestOptions from .._streaming import Stream, AsyncStream from .._exceptions import OpenAIError from .._base_client import DEFAULT_MAX_RETRIES, BaseClient _deployments_endpoints = set( [ "/completions", "/chat/completions", "/embeddings", "/audio/transcriptions", "/audio/translations", "/audio/speech", "/images/generations", ] ) AzureADTokenProvider = Callable[[], str] AsyncAzureADTokenProvider = Callable[[], "str | Awaitable[str]"] _HttpxClientT = TypeVar("_HttpxClientT", bound=Union[httpx.Client, httpx.AsyncClient]) _DefaultStreamT = TypeVar("_DefaultStreamT", bound=Union[Stream[Any], AsyncStream[Any]]) # we need to use a sentinel API key value for Azure AD # as we don't want to make the `api_key` in the main client Optional # and Azure AD tokens may be retrieved on a per-request basis API_KEY_SENTINEL = "".join(["<", "missing API key", ">"]) class MutuallyExclusiveAuthError(OpenAIError): def __init__(self) -> None: super().__init__( "The `api_key`, `azure_ad_token` and `azure_ad_token_provider` arguments are mutually exclusive; Only one can be passed at a time" ) class BaseAzureClient(BaseClient[_HttpxClientT, _DefaultStreamT]): @override def _build_request( self, options: FinalRequestOptions, ) -> httpx.Request: if options.url in _deployments_endpoints and is_mapping(options.json_data): model = options.json_data.get("model") if model is not None and not "/deployments" in str(self.base_url): options.url = f"/deployments/{model}{options.url}" return super()._build_request(options) class AzureOpenAI(BaseAzureClient[httpx.Client, Stream[Any]], OpenAI): @overload def __init__( self, *, azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, ) -> None: ... @overload def __init__( self, *, azure_deployment: str | None = None, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, ) -> None: ... @overload def __init__( self, *, base_url: str, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, ) -> None: ... def __init__( self, *, api_version: str | None = None, azure_endpoint: str | None = None, azure_deployment: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, organization: str | None = None, base_url: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.Client | None = None, _strict_response_validation: bool = False, ) -> None: """Construct a new synchronous azure openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `AZURE_OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN` - `api_version` from `OPENAI_API_VERSION` - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT` Args: azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request. azure_deployment: A model deployment, if given sets the base client URL to include `/deployments/{azure_deployment}`. Note: this means you won't be able to use non-deployment endpoints. """ if api_key is None: api_key = os.environ.get("AZURE_OPENAI_API_KEY") if azure_ad_token is None: azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN") if api_key is None and azure_ad_token is None and azure_ad_token_provider is None: raise OpenAIError( "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables." ) if api_version is None: api_version = os.environ.get("OPENAI_API_VERSION") if api_version is None: raise ValueError( "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable" ) if default_query is None: default_query = {"api-version": api_version} else: default_query = {**default_query, "api-version": api_version} if base_url is None: if azure_endpoint is None: azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") if azure_endpoint is None: raise ValueError( "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable" ) if azure_deployment is not None: base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" else: base_url = f"{azure_endpoint}/openai" else: if azure_endpoint is not None: raise ValueError("base_url and azure_endpoint are mutually exclusive") if api_key is None: # define a sentinel value to avoid any typing issues api_key = API_KEY_SENTINEL super().__init__( api_key=api_key, organization=organization, base_url=base_url, timeout=timeout, max_retries=max_retries, default_headers=default_headers, default_query=default_query, http_client=http_client, _strict_response_validation=_strict_response_validation, ) self._api_version = api_version self._azure_ad_token = azure_ad_token self._azure_ad_token_provider = azure_ad_token_provider @override def copy( self, *, api_key: str | None = None, organization: str | None = None, api_version: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AzureADTokenProvider | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.Client | None = None, max_retries: int | NotGiven = NOT_GIVEN, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ Create a new client instance re-using the same options given to the current client with optional overriding. """ return super().copy( api_key=api_key, organization=organization, base_url=base_url, timeout=timeout, http_client=http_client, max_retries=max_retries, default_headers=default_headers, set_default_headers=set_default_headers, default_query=default_query, set_default_query=set_default_query, _extra_kwargs={ "api_version": api_version or self._api_version, "azure_ad_token": azure_ad_token or self._azure_ad_token, "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, **_extra_kwargs, }, ) with_options = copy def _get_azure_ad_token(self) -> str | None: if self._azure_ad_token is not None: return self._azure_ad_token provider = self._azure_ad_token_provider if provider is not None: token = provider() if not token or not isinstance(token, str): # pyright: ignore[reportUnnecessaryIsInstance] raise ValueError( f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}", ) return token return None @override def _prepare_options(self, options: FinalRequestOptions) -> None: headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {} options.headers = headers azure_ad_token = self._get_azure_ad_token() if azure_ad_token is not None: if headers.get("Authorization") is None: headers["Authorization"] = f"Bearer {azure_ad_token}" elif self.api_key is not API_KEY_SENTINEL: if headers.get("api-key") is None: headers["api-key"] = self.api_key else: # should never be hit raise ValueError("Unable to handle auth") return super()._prepare_options(options) class AsyncAzureOpenAI(BaseAzureClient[httpx.AsyncClient, AsyncStream[Any]], AsyncOpenAI): @overload def __init__( self, *, azure_endpoint: str, azure_deployment: str | None = None, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, ) -> None: ... @overload def __init__( self, *, azure_deployment: str | None = None, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, ) -> None: ... @overload def __init__( self, *, base_url: str, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, ) -> None: ... def __init__( self, *, azure_endpoint: str | None = None, azure_deployment: str | None = None, api_version: str | None = None, api_key: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, organization: str | None = None, base_url: str | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, max_retries: int = DEFAULT_MAX_RETRIES, default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, http_client: httpx.AsyncClient | None = None, _strict_response_validation: bool = False, ) -> None: """Construct a new asynchronous azure openai client instance. This automatically infers the following arguments from their corresponding environment variables if they are not provided: - `api_key` from `AZURE_OPENAI_API_KEY` - `organization` from `OPENAI_ORG_ID` - `azure_ad_token` from `AZURE_OPENAI_AD_TOKEN` - `api_version` from `OPENAI_API_VERSION` - `azure_endpoint` from `AZURE_OPENAI_ENDPOINT` Args: azure_endpoint: Your Azure endpoint, including the resource, e.g. `https://example-resource.azure.openai.com/` azure_ad_token: Your Azure Active Directory token, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id azure_ad_token_provider: A function that returns an Azure Active Directory token, will be invoked on every request. azure_deployment: A model deployment, if given sets the base client URL to include `/deployments/{azure_deployment}`. Note: this means you won't be able to use non-deployment endpoints. """ if api_key is None: api_key = os.environ.get("AZURE_OPENAI_API_KEY") if azure_ad_token is None: azure_ad_token = os.environ.get("AZURE_OPENAI_AD_TOKEN") if api_key is None and azure_ad_token is None and azure_ad_token_provider is None: raise OpenAIError( "Missing credentials. Please pass one of `api_key`, `azure_ad_token`, `azure_ad_token_provider`, or the `AZURE_OPENAI_API_KEY` or `AZURE_OPENAI_AD_TOKEN` environment variables." ) if api_version is None: api_version = os.environ.get("OPENAI_API_VERSION") if api_version is None: raise ValueError( "Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable" ) if default_query is None: default_query = {"api-version": api_version} else: default_query = {**default_query, "api-version": api_version} if base_url is None: if azure_endpoint is None: azure_endpoint = os.environ.get("AZURE_OPENAI_ENDPOINT") if azure_endpoint is None: raise ValueError( "Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable" ) if azure_deployment is not None: base_url = f"{azure_endpoint}/openai/deployments/{azure_deployment}" else: base_url = f"{azure_endpoint}/openai" else: if azure_endpoint is not None: raise ValueError("base_url and azure_endpoint are mutually exclusive") if api_key is None: # define a sentinel value to avoid any typing issues api_key = API_KEY_SENTINEL super().__init__( api_key=api_key, organization=organization, base_url=base_url, timeout=timeout, max_retries=max_retries, default_headers=default_headers, default_query=default_query, http_client=http_client, _strict_response_validation=_strict_response_validation, ) self._api_version = api_version self._azure_ad_token = azure_ad_token self._azure_ad_token_provider = azure_ad_token_provider @override def copy( self, *, api_key: str | None = None, organization: str | None = None, api_version: str | None = None, azure_ad_token: str | None = None, azure_ad_token_provider: AsyncAzureADTokenProvider | None = None, base_url: str | httpx.URL | None = None, timeout: float | Timeout | None | NotGiven = NOT_GIVEN, http_client: httpx.AsyncClient | None = None, max_retries: int | NotGiven = NOT_GIVEN, default_headers: Mapping[str, str] | None = None, set_default_headers: Mapping[str, str] | None = None, default_query: Mapping[str, object] | None = None, set_default_query: Mapping[str, object] | None = None, _extra_kwargs: Mapping[str, Any] = {}, ) -> Self: """ Create a new client instance re-using the same options given to the current client with optional overriding. """ return super().copy( api_key=api_key, organization=organization, base_url=base_url, timeout=timeout, http_client=http_client, max_retries=max_retries, default_headers=default_headers, set_default_headers=set_default_headers, default_query=default_query, set_default_query=set_default_query, _extra_kwargs={ "api_version": api_version or self._api_version, "azure_ad_token": azure_ad_token or self._azure_ad_token, "azure_ad_token_provider": azure_ad_token_provider or self._azure_ad_token_provider, **_extra_kwargs, }, ) with_options = copy async def _get_azure_ad_token(self) -> str | None: if self._azure_ad_token is not None: return self._azure_ad_token provider = self._azure_ad_token_provider if provider is not None: token = provider() if inspect.isawaitable(token): token = await token if not token or not isinstance(token, str): raise ValueError( f"Expected `azure_ad_token_provider` argument to return a string but it returned {token}", ) return token return None @override async def _prepare_options(self, options: FinalRequestOptions) -> None: headers: dict[str, str | Omit] = {**options.headers} if is_given(options.headers) else {} options.headers = headers azure_ad_token = await self._get_azure_ad_token() if azure_ad_token is not None: if headers.get("Authorization") is None: headers["Authorization"] = f"Bearer {azure_ad_token}" elif self.api_key is not API_KEY_SENTINEL: if headers.get("api-key") is None: headers["api-key"] = self.api_key else: # should never be hit raise ValueError("Unable to handle auth") return await super()._prepare_options(options) openai-python-1.12.0/src/openai/pagination.py000066400000000000000000000052471456126711000211450ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Any, List, Generic, TypeVar, Optional, cast from typing_extensions import Protocol, override, runtime_checkable from ._base_client import BasePage, PageInfo, BaseSyncPage, BaseAsyncPage __all__ = ["SyncPage", "AsyncPage", "SyncCursorPage", "AsyncCursorPage"] _T = TypeVar("_T") @runtime_checkable class CursorPageItem(Protocol): id: Optional[str] class SyncPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" data: List[_T] object: str @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def next_page_info(self) -> None: """ This page represents a response that isn't actually paginated at the API level so there will never be a next page. """ return None class AsyncPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): """Note: no pagination actually occurs yet, this is for forwards-compatibility.""" data: List[_T] object: str @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def next_page_info(self) -> None: """ This page represents a response that isn't actually paginated at the API level so there will never be a next page. """ return None class SyncCursorPage(BaseSyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def next_page_info(self) -> Optional[PageInfo]: data = self.data if not data: return None item = cast(Any, data[-1]) if not isinstance(item, CursorPageItem) or item.id is None: # TODO emit warning log return None return PageInfo(params={"after": item.id}) class AsyncCursorPage(BaseAsyncPage[_T], BasePage[_T], Generic[_T]): data: List[_T] @override def _get_page_items(self) -> List[_T]: data = self.data if not data: return [] return data @override def next_page_info(self) -> Optional[PageInfo]: data = self.data if not data: return None item = cast(Any, data[-1]) if not isinstance(item, CursorPageItem) or item.id is None: # TODO emit warning log return None return PageInfo(params={"after": item.id}) openai-python-1.12.0/src/openai/py.typed000066400000000000000000000000001456126711000201170ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/000077500000000000000000000000001456126711000204445ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/__init__.py000066400000000000000000000072661456126711000225700ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .beta import ( Beta, AsyncBeta, BetaWithRawResponse, AsyncBetaWithRawResponse, BetaWithStreamingResponse, AsyncBetaWithStreamingResponse, ) from .chat import ( Chat, AsyncChat, ChatWithRawResponse, AsyncChatWithRawResponse, ChatWithStreamingResponse, AsyncChatWithStreamingResponse, ) from .audio import ( Audio, AsyncAudio, AudioWithRawResponse, AsyncAudioWithRawResponse, AudioWithStreamingResponse, AsyncAudioWithStreamingResponse, ) from .files import ( Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse, FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) from .images import ( Images, AsyncImages, ImagesWithRawResponse, AsyncImagesWithRawResponse, ImagesWithStreamingResponse, AsyncImagesWithStreamingResponse, ) from .models import ( Models, AsyncModels, ModelsWithRawResponse, AsyncModelsWithRawResponse, ModelsWithStreamingResponse, AsyncModelsWithStreamingResponse, ) from .embeddings import ( Embeddings, AsyncEmbeddings, EmbeddingsWithRawResponse, AsyncEmbeddingsWithRawResponse, EmbeddingsWithStreamingResponse, AsyncEmbeddingsWithStreamingResponse, ) from .completions import ( Completions, AsyncCompletions, CompletionsWithRawResponse, AsyncCompletionsWithRawResponse, CompletionsWithStreamingResponse, AsyncCompletionsWithStreamingResponse, ) from .fine_tuning import ( FineTuning, AsyncFineTuning, FineTuningWithRawResponse, AsyncFineTuningWithRawResponse, FineTuningWithStreamingResponse, AsyncFineTuningWithStreamingResponse, ) from .moderations import ( Moderations, AsyncModerations, ModerationsWithRawResponse, AsyncModerationsWithRawResponse, ModerationsWithStreamingResponse, AsyncModerationsWithStreamingResponse, ) __all__ = [ "Completions", "AsyncCompletions", "CompletionsWithRawResponse", "AsyncCompletionsWithRawResponse", "CompletionsWithStreamingResponse", "AsyncCompletionsWithStreamingResponse", "Chat", "AsyncChat", "ChatWithRawResponse", "AsyncChatWithRawResponse", "ChatWithStreamingResponse", "AsyncChatWithStreamingResponse", "Embeddings", "AsyncEmbeddings", "EmbeddingsWithRawResponse", "AsyncEmbeddingsWithRawResponse", "EmbeddingsWithStreamingResponse", "AsyncEmbeddingsWithStreamingResponse", "Files", "AsyncFiles", "FilesWithRawResponse", "AsyncFilesWithRawResponse", "FilesWithStreamingResponse", "AsyncFilesWithStreamingResponse", "Images", "AsyncImages", "ImagesWithRawResponse", "AsyncImagesWithRawResponse", "ImagesWithStreamingResponse", "AsyncImagesWithStreamingResponse", "Audio", "AsyncAudio", "AudioWithRawResponse", "AsyncAudioWithRawResponse", "AudioWithStreamingResponse", "AsyncAudioWithStreamingResponse", "Moderations", "AsyncModerations", "ModerationsWithRawResponse", "AsyncModerationsWithRawResponse", "ModerationsWithStreamingResponse", "AsyncModerationsWithStreamingResponse", "Models", "AsyncModels", "ModelsWithRawResponse", "AsyncModelsWithRawResponse", "ModelsWithStreamingResponse", "AsyncModelsWithStreamingResponse", "FineTuning", "AsyncFineTuning", "FineTuningWithRawResponse", "AsyncFineTuningWithRawResponse", "FineTuningWithStreamingResponse", "AsyncFineTuningWithStreamingResponse", "Beta", "AsyncBeta", "BetaWithRawResponse", "AsyncBetaWithRawResponse", "BetaWithStreamingResponse", "AsyncBetaWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/audio/000077500000000000000000000000001456126711000215455ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/audio/__init__.py000066400000000000000000000031661456126711000236640ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .audio import ( Audio, AsyncAudio, AudioWithRawResponse, AsyncAudioWithRawResponse, AudioWithStreamingResponse, AsyncAudioWithStreamingResponse, ) from .speech import ( Speech, AsyncSpeech, SpeechWithRawResponse, AsyncSpeechWithRawResponse, SpeechWithStreamingResponse, AsyncSpeechWithStreamingResponse, ) from .translations import ( Translations, AsyncTranslations, TranslationsWithRawResponse, AsyncTranslationsWithRawResponse, TranslationsWithStreamingResponse, AsyncTranslationsWithStreamingResponse, ) from .transcriptions import ( Transcriptions, AsyncTranscriptions, TranscriptionsWithRawResponse, AsyncTranscriptionsWithRawResponse, TranscriptionsWithStreamingResponse, AsyncTranscriptionsWithStreamingResponse, ) __all__ = [ "Transcriptions", "AsyncTranscriptions", "TranscriptionsWithRawResponse", "AsyncTranscriptionsWithRawResponse", "TranscriptionsWithStreamingResponse", "AsyncTranscriptionsWithStreamingResponse", "Translations", "AsyncTranslations", "TranslationsWithRawResponse", "AsyncTranslationsWithRawResponse", "TranslationsWithStreamingResponse", "AsyncTranslationsWithStreamingResponse", "Speech", "AsyncSpeech", "SpeechWithRawResponse", "AsyncSpeechWithRawResponse", "SpeechWithStreamingResponse", "AsyncSpeechWithStreamingResponse", "Audio", "AsyncAudio", "AudioWithRawResponse", "AsyncAudioWithRawResponse", "AudioWithStreamingResponse", "AsyncAudioWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/audio/audio.py000066400000000000000000000105401456126711000232200ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .speech import ( Speech, AsyncSpeech, SpeechWithRawResponse, AsyncSpeechWithRawResponse, SpeechWithStreamingResponse, AsyncSpeechWithStreamingResponse, ) from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from .translations import ( Translations, AsyncTranslations, TranslationsWithRawResponse, AsyncTranslationsWithRawResponse, TranslationsWithStreamingResponse, AsyncTranslationsWithStreamingResponse, ) from .transcriptions import ( Transcriptions, AsyncTranscriptions, TranscriptionsWithRawResponse, AsyncTranscriptionsWithRawResponse, TranscriptionsWithStreamingResponse, AsyncTranscriptionsWithStreamingResponse, ) __all__ = ["Audio", "AsyncAudio"] class Audio(SyncAPIResource): @cached_property def transcriptions(self) -> Transcriptions: return Transcriptions(self._client) @cached_property def translations(self) -> Translations: return Translations(self._client) @cached_property def speech(self) -> Speech: return Speech(self._client) @cached_property def with_raw_response(self) -> AudioWithRawResponse: return AudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AudioWithStreamingResponse: return AudioWithStreamingResponse(self) class AsyncAudio(AsyncAPIResource): @cached_property def transcriptions(self) -> AsyncTranscriptions: return AsyncTranscriptions(self._client) @cached_property def translations(self) -> AsyncTranslations: return AsyncTranslations(self._client) @cached_property def speech(self) -> AsyncSpeech: return AsyncSpeech(self._client) @cached_property def with_raw_response(self) -> AsyncAudioWithRawResponse: return AsyncAudioWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAudioWithStreamingResponse: return AsyncAudioWithStreamingResponse(self) class AudioWithRawResponse: def __init__(self, audio: Audio) -> None: self._audio = audio @cached_property def transcriptions(self) -> TranscriptionsWithRawResponse: return TranscriptionsWithRawResponse(self._audio.transcriptions) @cached_property def translations(self) -> TranslationsWithRawResponse: return TranslationsWithRawResponse(self._audio.translations) @cached_property def speech(self) -> SpeechWithRawResponse: return SpeechWithRawResponse(self._audio.speech) class AsyncAudioWithRawResponse: def __init__(self, audio: AsyncAudio) -> None: self._audio = audio @cached_property def transcriptions(self) -> AsyncTranscriptionsWithRawResponse: return AsyncTranscriptionsWithRawResponse(self._audio.transcriptions) @cached_property def translations(self) -> AsyncTranslationsWithRawResponse: return AsyncTranslationsWithRawResponse(self._audio.translations) @cached_property def speech(self) -> AsyncSpeechWithRawResponse: return AsyncSpeechWithRawResponse(self._audio.speech) class AudioWithStreamingResponse: def __init__(self, audio: Audio) -> None: self._audio = audio @cached_property def transcriptions(self) -> TranscriptionsWithStreamingResponse: return TranscriptionsWithStreamingResponse(self._audio.transcriptions) @cached_property def translations(self) -> TranslationsWithStreamingResponse: return TranslationsWithStreamingResponse(self._audio.translations) @cached_property def speech(self) -> SpeechWithStreamingResponse: return SpeechWithStreamingResponse(self._audio.speech) class AsyncAudioWithStreamingResponse: def __init__(self, audio: AsyncAudio) -> None: self._audio = audio @cached_property def transcriptions(self) -> AsyncTranscriptionsWithStreamingResponse: return AsyncTranscriptionsWithStreamingResponse(self._audio.transcriptions) @cached_property def translations(self) -> AsyncTranslationsWithStreamingResponse: return AsyncTranslationsWithStreamingResponse(self._audio.translations) @cached_property def speech(self) -> AsyncSpeechWithStreamingResponse: return AsyncSpeechWithStreamingResponse(self._audio.speech) openai-python-1.12.0/src/openai/resources/audio/speech.py000066400000000000000000000165571456126711000234040ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from typing_extensions import Literal import httpx from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import ( StreamedBinaryAPIResponse, AsyncStreamedBinaryAPIResponse, to_custom_streamed_response_wrapper, async_to_custom_streamed_response_wrapper, ) from ...types.audio import speech_create_params from ..._base_client import ( make_request_options, ) __all__ = ["Speech", "AsyncSpeech"] class Speech(SyncAPIResource): @cached_property def with_raw_response(self) -> SpeechWithRawResponse: return SpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> SpeechWithStreamingResponse: return SpeechWithStreamingResponse(self) def create( self, *, input: str, model: Union[str, Literal["tts-1", "tts-1-hd"]], voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> _legacy_response.HttpxBinaryResponseContent: """ Generates audio from the input text. Args: input: The text to generate audio for. The maximum length is 4096 characters. model: One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/audio/speech", body=maybe_transform( { "input": input, "model": model, "voice": voice, "response_format": response_format, "speed": speed, }, speech_create_params.SpeechCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=_legacy_response.HttpxBinaryResponseContent, ) class AsyncSpeech(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncSpeechWithRawResponse: return AsyncSpeechWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncSpeechWithStreamingResponse: return AsyncSpeechWithStreamingResponse(self) async def create( self, *, input: str, model: Union[str, Literal["tts-1", "tts-1-hd"]], voice: Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], response_format: Literal["mp3", "opus", "aac", "flac"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> _legacy_response.HttpxBinaryResponseContent: """ Generates audio from the input text. Args: input: The text to generate audio for. The maximum length is 4096 characters. model: One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd` voice: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( "/audio/speech", body=maybe_transform( { "input": input, "model": model, "voice": voice, "response_format": response_format, "speed": speed, }, speech_create_params.SpeechCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=_legacy_response.HttpxBinaryResponseContent, ) class SpeechWithRawResponse: def __init__(self, speech: Speech) -> None: self._speech = speech self.create = _legacy_response.to_raw_response_wrapper( speech.create, ) class AsyncSpeechWithRawResponse: def __init__(self, speech: AsyncSpeech) -> None: self._speech = speech self.create = _legacy_response.async_to_raw_response_wrapper( speech.create, ) class SpeechWithStreamingResponse: def __init__(self, speech: Speech) -> None: self._speech = speech self.create = to_custom_streamed_response_wrapper( speech.create, StreamedBinaryAPIResponse, ) class AsyncSpeechWithStreamingResponse: def __init__(self, speech: AsyncSpeech) -> None: self._speech = speech self.create = async_to_custom_streamed_response_wrapper( speech.create, AsyncStreamedBinaryAPIResponse, ) openai-python-1.12.0/src/openai/resources/audio/transcriptions.py000066400000000000000000000245511456126711000252100ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union, Mapping, cast from typing_extensions import Literal import httpx from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import extract_files, maybe_transform, deepcopy_minimal from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import Transcription, transcription_create_params from ..._base_client import ( make_request_options, ) __all__ = ["Transcriptions", "AsyncTranscriptions"] class Transcriptions(SyncAPIResource): @cached_property def with_raw_response(self) -> TranscriptionsWithRawResponse: return TranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranscriptionsWithStreamingResponse: return TranscriptionsWithStreamingResponse(self) def create( self, *, file: FileTypes, model: Union[str, Literal["whisper-1"]], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Transcription: """ Transcribes audio into the input language. Args: file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. Only `whisper-1` is currently available. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. response_format: The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these options: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "model": model, "language": language, "prompt": prompt, "response_format": response_format, "temperature": temperature, "timestamp_granularities": timestamp_granularities, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/audio/transcriptions", body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Transcription, ) class AsyncTranscriptions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranscriptionsWithRawResponse: return AsyncTranscriptionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse: return AsyncTranscriptionsWithStreamingResponse(self) async def create( self, *, file: FileTypes, model: Union[str, Literal["whisper-1"]], language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Transcription: """ Transcribes audio into the input language. Args: file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. Only `whisper-1` is currently available. language: The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. response_format: The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. timestamp_granularities: The timestamp granularities to populate for this transcription. Any of these options: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "model": model, "language": language, "prompt": prompt, "response_format": response_format, "temperature": temperature, "timestamp_granularities": timestamp_granularities, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/audio/transcriptions", body=maybe_transform(body, transcription_create_params.TranscriptionCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Transcription, ) class TranscriptionsWithRawResponse: def __init__(self, transcriptions: Transcriptions) -> None: self._transcriptions = transcriptions self.create = _legacy_response.to_raw_response_wrapper( transcriptions.create, ) class AsyncTranscriptionsWithRawResponse: def __init__(self, transcriptions: AsyncTranscriptions) -> None: self._transcriptions = transcriptions self.create = _legacy_response.async_to_raw_response_wrapper( transcriptions.create, ) class TranscriptionsWithStreamingResponse: def __init__(self, transcriptions: Transcriptions) -> None: self._transcriptions = transcriptions self.create = to_streamed_response_wrapper( transcriptions.create, ) class AsyncTranscriptionsWithStreamingResponse: def __init__(self, transcriptions: AsyncTranscriptions) -> None: self._transcriptions = transcriptions self.create = async_to_streamed_response_wrapper( transcriptions.create, ) openai-python-1.12.0/src/openai/resources/audio/translations.py000066400000000000000000000211451456126711000246430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Mapping, cast from typing_extensions import Literal import httpx from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from ..._utils import extract_files, maybe_transform, deepcopy_minimal from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...types.audio import Translation, translation_create_params from ..._base_client import ( make_request_options, ) __all__ = ["Translations", "AsyncTranslations"] class Translations(SyncAPIResource): @cached_property def with_raw_response(self) -> TranslationsWithRawResponse: return TranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> TranslationsWithStreamingResponse: return TranslationsWithStreamingResponse(self) def create( self, *, file: FileTypes, model: Union[str, Literal["whisper-1"]], prompt: str | NotGiven = NOT_GIVEN, response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Translation: """ Translates audio into English. Args: file: The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. Only `whisper-1` is currently available. prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. response_format: The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "model": model, "prompt": prompt, "response_format": response_format, "temperature": temperature, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/audio/translations", body=maybe_transform(body, translation_create_params.TranslationCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Translation, ) class AsyncTranslations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncTranslationsWithRawResponse: return AsyncTranslationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse: return AsyncTranslationsWithStreamingResponse(self) async def create( self, *, file: FileTypes, model: Union[str, Literal["whisper-1"]], prompt: str | NotGiven = NOT_GIVEN, response_format: str | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Translation: """ Translates audio into English. Args: file: The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. model: ID of the model to use. Only `whisper-1` is currently available. prompt: An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. response_format: The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "model": model, "prompt": prompt, "response_format": response_format, "temperature": temperature, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/audio/translations", body=maybe_transform(body, translation_create_params.TranslationCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Translation, ) class TranslationsWithRawResponse: def __init__(self, translations: Translations) -> None: self._translations = translations self.create = _legacy_response.to_raw_response_wrapper( translations.create, ) class AsyncTranslationsWithRawResponse: def __init__(self, translations: AsyncTranslations) -> None: self._translations = translations self.create = _legacy_response.async_to_raw_response_wrapper( translations.create, ) class TranslationsWithStreamingResponse: def __init__(self, translations: Translations) -> None: self._translations = translations self.create = to_streamed_response_wrapper( translations.create, ) class AsyncTranslationsWithStreamingResponse: def __init__(self, translations: AsyncTranslations) -> None: self._translations = translations self.create = async_to_streamed_response_wrapper( translations.create, ) openai-python-1.12.0/src/openai/resources/beta/000077500000000000000000000000001456126711000213575ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/beta/__init__.py000066400000000000000000000022221456126711000234660ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .beta import ( Beta, AsyncBeta, BetaWithRawResponse, AsyncBetaWithRawResponse, BetaWithStreamingResponse, AsyncBetaWithStreamingResponse, ) from .threads import ( Threads, AsyncThreads, ThreadsWithRawResponse, AsyncThreadsWithRawResponse, ThreadsWithStreamingResponse, AsyncThreadsWithStreamingResponse, ) from .assistants import ( Assistants, AsyncAssistants, AssistantsWithRawResponse, AsyncAssistantsWithRawResponse, AssistantsWithStreamingResponse, AsyncAssistantsWithStreamingResponse, ) __all__ = [ "Assistants", "AsyncAssistants", "AssistantsWithRawResponse", "AsyncAssistantsWithRawResponse", "AssistantsWithStreamingResponse", "AsyncAssistantsWithStreamingResponse", "Threads", "AsyncThreads", "ThreadsWithRawResponse", "AsyncThreadsWithRawResponse", "ThreadsWithStreamingResponse", "AsyncThreadsWithStreamingResponse", "Beta", "AsyncBeta", "BetaWithRawResponse", "AsyncBetaWithRawResponse", "BetaWithStreamingResponse", "AsyncBetaWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/beta/assistants/000077500000000000000000000000001456126711000235535ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/beta/assistants/__init__.py000066400000000000000000000014601456126711000256650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .files import ( Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse, FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) from .assistants import ( Assistants, AsyncAssistants, AssistantsWithRawResponse, AsyncAssistantsWithRawResponse, AssistantsWithStreamingResponse, AsyncAssistantsWithStreamingResponse, ) __all__ = [ "Files", "AsyncFiles", "FilesWithRawResponse", "AsyncFilesWithRawResponse", "FilesWithStreamingResponse", "AsyncFilesWithStreamingResponse", "Assistants", "AsyncAssistants", "AssistantsWithRawResponse", "AsyncAssistantsWithRawResponse", "AssistantsWithStreamingResponse", "AsyncAssistantsWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/beta/assistants/assistants.py000066400000000000000000000736431456126711000263360ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Iterable, Optional from typing_extensions import Literal import httpx from .... import _legacy_response from .files import ( Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse, FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage from ....types.beta import ( Assistant, AssistantDeleted, assistant_list_params, assistant_create_params, assistant_update_params, ) from ...._base_client import ( AsyncPaginator, make_request_options, ) __all__ = ["Assistants", "AsyncAssistants"] class Assistants(SyncAPIResource): @cached_property def files(self) -> Files: return Files(self._client) @cached_property def with_raw_response(self) -> AssistantsWithRawResponse: return AssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AssistantsWithStreamingResponse: return AssistantsWithStreamingResponse(self) def create( self, *, model: str, description: Optional[str] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """ Create an assistant with a model and instructions. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. description: The description of the assistant. The maximum length is 512 characters. file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( "/assistants", body=maybe_transform( { "model": model, "description": description, "file_ids": file_ids, "instructions": instructions, "metadata": metadata, "name": name, "tools": tools, }, assistant_create_params.AssistantCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) def retrieve( self, assistant_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """ Retrieves an assistant. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/assistants/{assistant_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) def update( self, assistant_id: str, *, description: Optional[str] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """Modifies an assistant. Args: description: The description of the assistant. The maximum length is 512 characters. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. name: The name of the assistant. The maximum length is 256 characters. tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/assistants/{assistant_id}", body=maybe_transform( { "description": description, "file_ids": file_ids, "instructions": instructions, "metadata": metadata, "model": model, "name": name, "tools": tools, }, assistant_update_params.AssistantUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[Assistant]: """Returns a list of assistants. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( "/assistants", page=SyncCursorPage[Assistant], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, assistant_list_params.AssistantListParams, ), ), model=Assistant, ) def delete( self, assistant_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantDeleted: """ Delete an assistant. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._delete( f"/assistants/{assistant_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantDeleted, ) class AsyncAssistants(AsyncAPIResource): @cached_property def files(self) -> AsyncFiles: return AsyncFiles(self._client) @cached_property def with_raw_response(self) -> AsyncAssistantsWithRawResponse: return AsyncAssistantsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self) async def create( self, *, model: str, description: Optional[str] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, tools: Iterable[assistant_create_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """ Create an assistant with a model and instructions. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. description: The description of the assistant. The maximum length is 512 characters. file_ids: A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. name: The name of the assistant. The maximum length is 256 characters. tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( "/assistants", body=maybe_transform( { "model": model, "description": description, "file_ids": file_ids, "instructions": instructions, "metadata": metadata, "name": name, "tools": tools, }, assistant_create_params.AssistantCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) async def retrieve( self, assistant_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """ Retrieves an assistant. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/assistants/{assistant_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) async def update( self, assistant_id: str, *, description: Optional[str] | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: str | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, tools: Iterable[assistant_update_params.Tool] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Assistant: """Modifies an assistant. Args: description: The description of the assistant. The maximum length is 512 characters. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. instructions: The system instructions that the assistant uses. The maximum length is 32768 characters. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. name: The name of the assistant. The maximum length is 256 characters. tools: A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/assistants/{assistant_id}", body=maybe_transform( { "description": description, "file_ids": file_ids, "instructions": instructions, "metadata": metadata, "model": model, "name": name, "tools": tools, }, assistant_update_params.AssistantUpdateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Assistant, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[Assistant, AsyncCursorPage[Assistant]]: """Returns a list of assistants. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( "/assistants", page=AsyncCursorPage[Assistant], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, assistant_list_params.AssistantListParams, ), ), model=Assistant, ) async def delete( self, assistant_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantDeleted: """ Delete an assistant. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._delete( f"/assistants/{assistant_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantDeleted, ) class AssistantsWithRawResponse: def __init__(self, assistants: Assistants) -> None: self._assistants = assistants self.create = _legacy_response.to_raw_response_wrapper( assistants.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( assistants.retrieve, ) self.update = _legacy_response.to_raw_response_wrapper( assistants.update, ) self.list = _legacy_response.to_raw_response_wrapper( assistants.list, ) self.delete = _legacy_response.to_raw_response_wrapper( assistants.delete, ) @cached_property def files(self) -> FilesWithRawResponse: return FilesWithRawResponse(self._assistants.files) class AsyncAssistantsWithRawResponse: def __init__(self, assistants: AsyncAssistants) -> None: self._assistants = assistants self.create = _legacy_response.async_to_raw_response_wrapper( assistants.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( assistants.retrieve, ) self.update = _legacy_response.async_to_raw_response_wrapper( assistants.update, ) self.list = _legacy_response.async_to_raw_response_wrapper( assistants.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( assistants.delete, ) @cached_property def files(self) -> AsyncFilesWithRawResponse: return AsyncFilesWithRawResponse(self._assistants.files) class AssistantsWithStreamingResponse: def __init__(self, assistants: Assistants) -> None: self._assistants = assistants self.create = to_streamed_response_wrapper( assistants.create, ) self.retrieve = to_streamed_response_wrapper( assistants.retrieve, ) self.update = to_streamed_response_wrapper( assistants.update, ) self.list = to_streamed_response_wrapper( assistants.list, ) self.delete = to_streamed_response_wrapper( assistants.delete, ) @cached_property def files(self) -> FilesWithStreamingResponse: return FilesWithStreamingResponse(self._assistants.files) class AsyncAssistantsWithStreamingResponse: def __init__(self, assistants: AsyncAssistants) -> None: self._assistants = assistants self.create = async_to_streamed_response_wrapper( assistants.create, ) self.retrieve = async_to_streamed_response_wrapper( assistants.retrieve, ) self.update = async_to_streamed_response_wrapper( assistants.update, ) self.list = async_to_streamed_response_wrapper( assistants.list, ) self.delete = async_to_streamed_response_wrapper( assistants.delete, ) @cached_property def files(self) -> AsyncFilesWithStreamingResponse: return AsyncFilesWithStreamingResponse(self._assistants.files) openai-python-1.12.0/src/openai/resources/beta/assistants/files.py000066400000000000000000000457501456126711000252420ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal import httpx from .... import _legacy_response from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import maybe_transform from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage from ...._base_client import ( AsyncPaginator, make_request_options, ) from ....types.beta.assistants import AssistantFile, FileDeleteResponse, file_list_params, file_create_params __all__ = ["Files", "AsyncFiles"] class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: return FilesWithStreamingResponse(self) def create( self, assistant_id: str, *, file_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantFile: """ Create an assistant file by attaching a [File](https://platform.openai.com/docs/api-reference/files) to an [assistant](https://platform.openai.com/docs/api-reference/assistants). Args: file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/assistants/{assistant_id}/files", body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantFile, ) def retrieve( self, file_id: str, *, assistant_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantFile: """ Retrieves an AssistantFile. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/assistants/{assistant_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantFile, ) def list( self, assistant_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[AssistantFile]: """ Returns a list of assistant files. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/assistants/{assistant_id}/files", page=SyncCursorPage[AssistantFile], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, file_list_params.FileListParams, ), ), model=AssistantFile, ) def delete( self, file_id: str, *, assistant_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleteResponse: """ Delete an assistant file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._delete( f"/assistants/{assistant_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileDeleteResponse, ) class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: return AsyncFilesWithStreamingResponse(self) async def create( self, assistant_id: str, *, file_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantFile: """ Create an assistant file by attaching a [File](https://platform.openai.com/docs/api-reference/files) to an [assistant](https://platform.openai.com/docs/api-reference/assistants). Args: file_id: A [File](https://platform.openai.com/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/assistants/{assistant_id}/files", body=maybe_transform({"file_id": file_id}, file_create_params.FileCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantFile, ) async def retrieve( self, file_id: str, *, assistant_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AssistantFile: """ Retrieves an AssistantFile. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/assistants/{assistant_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=AssistantFile, ) def list( self, assistant_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[AssistantFile, AsyncCursorPage[AssistantFile]]: """ Returns a list of assistant files. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/assistants/{assistant_id}/files", page=AsyncCursorPage[AssistantFile], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, file_list_params.FileListParams, ), ), model=AssistantFile, ) async def delete( self, file_id: str, *, assistant_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleteResponse: """ Delete an assistant file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not assistant_id: raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._delete( f"/assistants/{assistant_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileDeleteResponse, ) class FilesWithRawResponse: def __init__(self, files: Files) -> None: self._files = files self.create = _legacy_response.to_raw_response_wrapper( files.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.to_raw_response_wrapper( files.delete, ) class AsyncFilesWithRawResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.create = _legacy_response.async_to_raw_response_wrapper( files.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( files.delete, ) class FilesWithStreamingResponse: def __init__(self, files: Files) -> None: self._files = files self.create = to_streamed_response_wrapper( files.create, ) self.retrieve = to_streamed_response_wrapper( files.retrieve, ) self.list = to_streamed_response_wrapper( files.list, ) self.delete = to_streamed_response_wrapper( files.delete, ) class AsyncFilesWithStreamingResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.create = async_to_streamed_response_wrapper( files.create, ) self.retrieve = async_to_streamed_response_wrapper( files.retrieve, ) self.list = async_to_streamed_response_wrapper( files.list, ) self.delete = async_to_streamed_response_wrapper( files.delete, ) openai-python-1.12.0/src/openai/resources/beta/beta.py000066400000000000000000000064141456126711000226510ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .threads import ( Threads, AsyncThreads, ThreadsWithRawResponse, AsyncThreadsWithRawResponse, ThreadsWithStreamingResponse, AsyncThreadsWithStreamingResponse, ) from ..._compat import cached_property from .assistants import ( Assistants, AsyncAssistants, AssistantsWithRawResponse, AsyncAssistantsWithRawResponse, AssistantsWithStreamingResponse, AsyncAssistantsWithStreamingResponse, ) from ..._resource import SyncAPIResource, AsyncAPIResource from .threads.threads import Threads, AsyncThreads from .assistants.assistants import Assistants, AsyncAssistants __all__ = ["Beta", "AsyncBeta"] class Beta(SyncAPIResource): @cached_property def assistants(self) -> Assistants: return Assistants(self._client) @cached_property def threads(self) -> Threads: return Threads(self._client) @cached_property def with_raw_response(self) -> BetaWithRawResponse: return BetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> BetaWithStreamingResponse: return BetaWithStreamingResponse(self) class AsyncBeta(AsyncAPIResource): @cached_property def assistants(self) -> AsyncAssistants: return AsyncAssistants(self._client) @cached_property def threads(self) -> AsyncThreads: return AsyncThreads(self._client) @cached_property def with_raw_response(self) -> AsyncBetaWithRawResponse: return AsyncBetaWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncBetaWithStreamingResponse: return AsyncBetaWithStreamingResponse(self) class BetaWithRawResponse: def __init__(self, beta: Beta) -> None: self._beta = beta @cached_property def assistants(self) -> AssistantsWithRawResponse: return AssistantsWithRawResponse(self._beta.assistants) @cached_property def threads(self) -> ThreadsWithRawResponse: return ThreadsWithRawResponse(self._beta.threads) class AsyncBetaWithRawResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta @cached_property def assistants(self) -> AsyncAssistantsWithRawResponse: return AsyncAssistantsWithRawResponse(self._beta.assistants) @cached_property def threads(self) -> AsyncThreadsWithRawResponse: return AsyncThreadsWithRawResponse(self._beta.threads) class BetaWithStreamingResponse: def __init__(self, beta: Beta) -> None: self._beta = beta @cached_property def assistants(self) -> AssistantsWithStreamingResponse: return AssistantsWithStreamingResponse(self._beta.assistants) @cached_property def threads(self) -> ThreadsWithStreamingResponse: return ThreadsWithStreamingResponse(self._beta.threads) class AsyncBetaWithStreamingResponse: def __init__(self, beta: AsyncBeta) -> None: self._beta = beta @cached_property def assistants(self) -> AsyncAssistantsWithStreamingResponse: return AsyncAssistantsWithStreamingResponse(self._beta.assistants) @cached_property def threads(self) -> AsyncThreadsWithStreamingResponse: return AsyncThreadsWithStreamingResponse(self._beta.threads) openai-python-1.12.0/src/openai/resources/beta/threads/000077500000000000000000000000001456126711000230115ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/beta/threads/__init__.py000066400000000000000000000021701456126711000251220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .runs import ( Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse, RunsWithStreamingResponse, AsyncRunsWithStreamingResponse, ) from .threads import ( Threads, AsyncThreads, ThreadsWithRawResponse, AsyncThreadsWithRawResponse, ThreadsWithStreamingResponse, AsyncThreadsWithStreamingResponse, ) from .messages import ( Messages, AsyncMessages, MessagesWithRawResponse, AsyncMessagesWithRawResponse, MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) __all__ = [ "Runs", "AsyncRuns", "RunsWithRawResponse", "AsyncRunsWithRawResponse", "RunsWithStreamingResponse", "AsyncRunsWithStreamingResponse", "Messages", "AsyncMessages", "MessagesWithRawResponse", "AsyncMessagesWithRawResponse", "MessagesWithStreamingResponse", "AsyncMessagesWithStreamingResponse", "Threads", "AsyncThreads", "ThreadsWithRawResponse", "AsyncThreadsWithRawResponse", "ThreadsWithStreamingResponse", "AsyncThreadsWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/beta/threads/messages/000077500000000000000000000000001456126711000246205ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/beta/threads/messages/__init__.py000066400000000000000000000014261456126711000267340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .files import ( Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse, FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) from .messages import ( Messages, AsyncMessages, MessagesWithRawResponse, AsyncMessagesWithRawResponse, MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) __all__ = [ "Files", "AsyncFiles", "FilesWithRawResponse", "AsyncFilesWithRawResponse", "FilesWithStreamingResponse", "AsyncFilesWithStreamingResponse", "Messages", "AsyncMessages", "MessagesWithRawResponse", "AsyncMessagesWithRawResponse", "MessagesWithStreamingResponse", "AsyncMessagesWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/beta/threads/messages/files.py000066400000000000000000000277531456126711000263120ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal import httpx from ..... import _legacy_response from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ....._utils import maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import ( AsyncPaginator, make_request_options, ) from .....types.beta.threads.messages import MessageFile, file_list_params __all__ = ["Files", "AsyncFiles"] class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: return FilesWithStreamingResponse(self) def retrieve( self, file_id: str, *, thread_id: str, message_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> MessageFile: """ Retrieves a message file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/threads/{thread_id}/messages/{message_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=MessageFile, ) def list( self, message_id: str, *, thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[MessageFile]: """Returns a list of message files. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/messages/{message_id}/files", page=SyncCursorPage[MessageFile], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, file_list_params.FileListParams, ), ), model=MessageFile, ) class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: return AsyncFilesWithStreamingResponse(self) async def retrieve( self, file_id: str, *, thread_id: str, message_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> MessageFile: """ Retrieves a message file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}/messages/{message_id}/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=MessageFile, ) def list( self, message_id: str, *, thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[MessageFile, AsyncCursorPage[MessageFile]]: """Returns a list of message files. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/messages/{message_id}/files", page=AsyncCursorPage[MessageFile], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, file_list_params.FileListParams, ), ), model=MessageFile, ) class FilesWithRawResponse: def __init__(self, files: Files) -> None: self._files = files self.retrieve = _legacy_response.to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( files.list, ) class AsyncFilesWithRawResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.retrieve = _legacy_response.async_to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( files.list, ) class FilesWithStreamingResponse: def __init__(self, files: Files) -> None: self._files = files self.retrieve = to_streamed_response_wrapper( files.retrieve, ) self.list = to_streamed_response_wrapper( files.list, ) class AsyncFilesWithStreamingResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.retrieve = async_to_streamed_response_wrapper( files.retrieve, ) self.list = async_to_streamed_response_wrapper( files.list, ) openai-python-1.12.0/src/openai/resources/beta/threads/messages/messages.py000066400000000000000000000544571456126711000270200ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Optional from typing_extensions import Literal import httpx from ..... import _legacy_response from .files import ( Files, AsyncFiles, FilesWithRawResponse, AsyncFilesWithRawResponse, FilesWithStreamingResponse, AsyncFilesWithStreamingResponse, ) from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ....._utils import maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import ( AsyncPaginator, make_request_options, ) from .....types.beta.threads import ThreadMessage, message_list_params, message_create_params, message_update_params __all__ = ["Messages", "AsyncMessages"] class Messages(SyncAPIResource): @cached_property def files(self) -> Files: return Files(self._client) @cached_property def with_raw_response(self) -> MessagesWithRawResponse: return MessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> MessagesWithStreamingResponse: return MessagesWithStreamingResponse(self) def create( self, thread_id: str, *, content: str, role: Literal["user"], file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Create a message. Args: content: The content of the message. role: The role of the entity that is creating the message. Currently only `user` is supported. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/messages", body=maybe_transform( { "content": content, "role": role, "file_ids": file_ids, "metadata": metadata, }, message_create_params.MessageCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) def retrieve( self, message_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Retrieve a message. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/threads/{thread_id}/messages/{message_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) def update( self, message_id: str, *, thread_id: str, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Modifies a message. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/messages/{message_id}", body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) def list( self, thread_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[ThreadMessage]: """ Returns a list of messages for a given thread. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/messages", page=SyncCursorPage[ThreadMessage], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, message_list_params.MessageListParams, ), ), model=ThreadMessage, ) class AsyncMessages(AsyncAPIResource): @cached_property def files(self) -> AsyncFiles: return AsyncFiles(self._client) @cached_property def with_raw_response(self) -> AsyncMessagesWithRawResponse: return AsyncMessagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncMessagesWithStreamingResponse: return AsyncMessagesWithStreamingResponse(self) async def create( self, thread_id: str, *, content: str, role: Literal["user"], file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Create a message. Args: content: The content of the message. role: The role of the entity that is creating the message. Currently only `user` is supported. file_ids: A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/messages", body=maybe_transform( { "content": content, "role": role, "file_ids": file_ids, "metadata": metadata, }, message_create_params.MessageCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) async def retrieve( self, message_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Retrieve a message. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}/messages/{message_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) async def update( self, message_id: str, *, thread_id: str, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadMessage: """ Modifies a message. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not message_id: raise ValueError(f"Expected a non-empty value for `message_id` but received {message_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/messages/{message_id}", body=maybe_transform({"metadata": metadata}, message_update_params.MessageUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadMessage, ) def list( self, thread_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[ThreadMessage, AsyncCursorPage[ThreadMessage]]: """ Returns a list of messages for a given thread. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/messages", page=AsyncCursorPage[ThreadMessage], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, message_list_params.MessageListParams, ), ), model=ThreadMessage, ) class MessagesWithRawResponse: def __init__(self, messages: Messages) -> None: self._messages = messages self.create = _legacy_response.to_raw_response_wrapper( messages.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( messages.retrieve, ) self.update = _legacy_response.to_raw_response_wrapper( messages.update, ) self.list = _legacy_response.to_raw_response_wrapper( messages.list, ) @cached_property def files(self) -> FilesWithRawResponse: return FilesWithRawResponse(self._messages.files) class AsyncMessagesWithRawResponse: def __init__(self, messages: AsyncMessages) -> None: self._messages = messages self.create = _legacy_response.async_to_raw_response_wrapper( messages.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( messages.retrieve, ) self.update = _legacy_response.async_to_raw_response_wrapper( messages.update, ) self.list = _legacy_response.async_to_raw_response_wrapper( messages.list, ) @cached_property def files(self) -> AsyncFilesWithRawResponse: return AsyncFilesWithRawResponse(self._messages.files) class MessagesWithStreamingResponse: def __init__(self, messages: Messages) -> None: self._messages = messages self.create = to_streamed_response_wrapper( messages.create, ) self.retrieve = to_streamed_response_wrapper( messages.retrieve, ) self.update = to_streamed_response_wrapper( messages.update, ) self.list = to_streamed_response_wrapper( messages.list, ) @cached_property def files(self) -> FilesWithStreamingResponse: return FilesWithStreamingResponse(self._messages.files) class AsyncMessagesWithStreamingResponse: def __init__(self, messages: AsyncMessages) -> None: self._messages = messages self.create = async_to_streamed_response_wrapper( messages.create, ) self.retrieve = async_to_streamed_response_wrapper( messages.retrieve, ) self.update = async_to_streamed_response_wrapper( messages.update, ) self.list = async_to_streamed_response_wrapper( messages.list, ) @cached_property def files(self) -> AsyncFilesWithStreamingResponse: return AsyncFilesWithStreamingResponse(self._messages.files) openai-python-1.12.0/src/openai/resources/beta/threads/runs/000077500000000000000000000000001456126711000240005ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/beta/threads/runs/__init__.py000066400000000000000000000013421456126711000261110ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .runs import ( Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse, RunsWithStreamingResponse, AsyncRunsWithStreamingResponse, ) from .steps import ( Steps, AsyncSteps, StepsWithRawResponse, AsyncStepsWithRawResponse, StepsWithStreamingResponse, AsyncStepsWithStreamingResponse, ) __all__ = [ "Steps", "AsyncSteps", "StepsWithRawResponse", "AsyncStepsWithRawResponse", "StepsWithStreamingResponse", "AsyncStepsWithStreamingResponse", "Runs", "AsyncRuns", "RunsWithRawResponse", "AsyncRunsWithRawResponse", "RunsWithStreamingResponse", "AsyncRunsWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/beta/threads/runs/runs.py000066400000000000000000000770261456126711000253550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Iterable, Optional from typing_extensions import Literal import httpx from ..... import _legacy_response from .steps import ( Steps, AsyncSteps, StepsWithRawResponse, AsyncStepsWithRawResponse, StepsWithStreamingResponse, AsyncStepsWithStreamingResponse, ) from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ....._utils import maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import ( AsyncPaginator, make_request_options, ) from .....types.beta.threads import ( Run, run_list_params, run_create_params, run_update_params, run_submit_tool_outputs_params, ) __all__ = ["Runs", "AsyncRuns"] class Runs(SyncAPIResource): @cached_property def steps(self) -> Steps: return Steps(self._client) @cached_property def with_raw_response(self) -> RunsWithRawResponse: return RunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> RunsWithStreamingResponse: return RunsWithStreamingResponse(self) def create( self, thread_id: str, *, assistant_id: str, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Create a run. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. instructions: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/runs", body=maybe_transform( { "assistant_id": assistant_id, "additional_instructions": additional_instructions, "instructions": instructions, "metadata": metadata, "model": model, "tools": tools, }, run_create_params.RunCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) def retrieve( self, run_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Retrieves a run. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/threads/{thread_id}/runs/{run_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) def update( self, run_id: str, *, thread_id: str, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Modifies a run. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/runs/{run_id}", body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) def list( self, thread_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[Run]: """ Returns a list of runs belonging to a thread. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/runs", page=SyncCursorPage[Run], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, run_list_params.RunListParams, ), ), model=Run, ) def cancel( self, run_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Cancels a run that is `in_progress`. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/runs/{run_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) def submit_tool_outputs( self, run_id: str, *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. Args: tool_outputs: A list of tools for which the outputs are being submitted. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", body=maybe_transform( {"tool_outputs": tool_outputs}, run_submit_tool_outputs_params.RunSubmitToolOutputsParams ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) class AsyncRuns(AsyncAPIResource): @cached_property def steps(self) -> AsyncSteps: return AsyncSteps(self._client) @cached_property def with_raw_response(self) -> AsyncRunsWithRawResponse: return AsyncRunsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncRunsWithStreamingResponse: return AsyncRunsWithStreamingResponse(self) async def create( self, thread_id: str, *, assistant_id: str, additional_instructions: Optional[str] | NotGiven = NOT_GIVEN, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Create a run. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. additional_instructions: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. instructions: Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/runs", body=maybe_transform( { "assistant_id": assistant_id, "additional_instructions": additional_instructions, "instructions": instructions, "metadata": metadata, "model": model, "tools": tools, }, run_create_params.RunCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) async def retrieve( self, run_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Retrieves a run. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}/runs/{run_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) async def update( self, run_id: str, *, thread_id: str, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Modifies a run. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/runs/{run_id}", body=maybe_transform({"metadata": metadata}, run_update_params.RunUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) def list( self, thread_id: str, *, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[Run, AsyncCursorPage[Run]]: """ Returns a list of runs belonging to a thread. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/runs", page=AsyncCursorPage[Run], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, run_list_params.RunListParams, ), ), model=Run, ) async def cancel( self, run_id: str, *, thread_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Cancels a run that is `in_progress`. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/runs/{run_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) async def submit_tool_outputs( self, run_id: str, *, thread_id: str, tool_outputs: Iterable[run_submit_tool_outputs_params.ToolOutput], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. Args: tool_outputs: A list of tools for which the outputs are being submitted. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}/runs/{run_id}/submit_tool_outputs", body=maybe_transform( {"tool_outputs": tool_outputs}, run_submit_tool_outputs_params.RunSubmitToolOutputsParams ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) class RunsWithRawResponse: def __init__(self, runs: Runs) -> None: self._runs = runs self.create = _legacy_response.to_raw_response_wrapper( runs.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( runs.retrieve, ) self.update = _legacy_response.to_raw_response_wrapper( runs.update, ) self.list = _legacy_response.to_raw_response_wrapper( runs.list, ) self.cancel = _legacy_response.to_raw_response_wrapper( runs.cancel, ) self.submit_tool_outputs = _legacy_response.to_raw_response_wrapper( runs.submit_tool_outputs, ) @cached_property def steps(self) -> StepsWithRawResponse: return StepsWithRawResponse(self._runs.steps) class AsyncRunsWithRawResponse: def __init__(self, runs: AsyncRuns) -> None: self._runs = runs self.create = _legacy_response.async_to_raw_response_wrapper( runs.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( runs.retrieve, ) self.update = _legacy_response.async_to_raw_response_wrapper( runs.update, ) self.list = _legacy_response.async_to_raw_response_wrapper( runs.list, ) self.cancel = _legacy_response.async_to_raw_response_wrapper( runs.cancel, ) self.submit_tool_outputs = _legacy_response.async_to_raw_response_wrapper( runs.submit_tool_outputs, ) @cached_property def steps(self) -> AsyncStepsWithRawResponse: return AsyncStepsWithRawResponse(self._runs.steps) class RunsWithStreamingResponse: def __init__(self, runs: Runs) -> None: self._runs = runs self.create = to_streamed_response_wrapper( runs.create, ) self.retrieve = to_streamed_response_wrapper( runs.retrieve, ) self.update = to_streamed_response_wrapper( runs.update, ) self.list = to_streamed_response_wrapper( runs.list, ) self.cancel = to_streamed_response_wrapper( runs.cancel, ) self.submit_tool_outputs = to_streamed_response_wrapper( runs.submit_tool_outputs, ) @cached_property def steps(self) -> StepsWithStreamingResponse: return StepsWithStreamingResponse(self._runs.steps) class AsyncRunsWithStreamingResponse: def __init__(self, runs: AsyncRuns) -> None: self._runs = runs self.create = async_to_streamed_response_wrapper( runs.create, ) self.retrieve = async_to_streamed_response_wrapper( runs.retrieve, ) self.update = async_to_streamed_response_wrapper( runs.update, ) self.list = async_to_streamed_response_wrapper( runs.list, ) self.cancel = async_to_streamed_response_wrapper( runs.cancel, ) self.submit_tool_outputs = async_to_streamed_response_wrapper( runs.submit_tool_outputs, ) @cached_property def steps(self) -> AsyncStepsWithStreamingResponse: return AsyncStepsWithStreamingResponse(self._runs.steps) openai-python-1.12.0/src/openai/resources/beta/threads/runs/steps.py000066400000000000000000000275551456126711000255260ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal import httpx from ..... import _legacy_response from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ....._utils import maybe_transform from ....._compat import cached_property from ....._resource import SyncAPIResource, AsyncAPIResource from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .....pagination import SyncCursorPage, AsyncCursorPage from ....._base_client import ( AsyncPaginator, make_request_options, ) from .....types.beta.threads.runs import RunStep, step_list_params __all__ = ["Steps", "AsyncSteps"] class Steps(SyncAPIResource): @cached_property def with_raw_response(self) -> StepsWithRawResponse: return StepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> StepsWithStreamingResponse: return StepsWithStreamingResponse(self) def retrieve( self, step_id: str, *, thread_id: str, run_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RunStep: """ Retrieves a run step. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") if not step_id: raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=RunStep, ) def list( self, run_id: str, *, thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[RunStep]: """ Returns a list of run steps belonging to a run. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/runs/{run_id}/steps", page=SyncCursorPage[RunStep], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, step_list_params.StepListParams, ), ), model=RunStep, ) class AsyncSteps(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncStepsWithRawResponse: return AsyncStepsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncStepsWithStreamingResponse: return AsyncStepsWithStreamingResponse(self) async def retrieve( self, step_id: str, *, thread_id: str, run_id: str, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RunStep: """ Retrieves a run step. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") if not step_id: raise ValueError(f"Expected a non-empty value for `step_id` but received {step_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}/runs/{run_id}/steps/{step_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=RunStep, ) def list( self, run_id: str, *, thread_id: str, after: str | NotGiven = NOT_GIVEN, before: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[RunStep, AsyncCursorPage[RunStep]]: """ Returns a list of run steps belonging to a run. Args: after: A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. before: A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. limit: A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. order: Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") if not run_id: raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get_api_list( f"/threads/{thread_id}/runs/{run_id}/steps", page=AsyncCursorPage[RunStep], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "before": before, "limit": limit, "order": order, }, step_list_params.StepListParams, ), ), model=RunStep, ) class StepsWithRawResponse: def __init__(self, steps: Steps) -> None: self._steps = steps self.retrieve = _legacy_response.to_raw_response_wrapper( steps.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( steps.list, ) class AsyncStepsWithRawResponse: def __init__(self, steps: AsyncSteps) -> None: self._steps = steps self.retrieve = _legacy_response.async_to_raw_response_wrapper( steps.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( steps.list, ) class StepsWithStreamingResponse: def __init__(self, steps: Steps) -> None: self._steps = steps self.retrieve = to_streamed_response_wrapper( steps.retrieve, ) self.list = to_streamed_response_wrapper( steps.list, ) class AsyncStepsWithStreamingResponse: def __init__(self, steps: AsyncSteps) -> None: self._steps = steps self.retrieve = async_to_streamed_response_wrapper( steps.retrieve, ) self.list = async_to_streamed_response_wrapper( steps.list, ) openai-python-1.12.0/src/openai/resources/beta/threads/threads.py000066400000000000000000000612711456126711000250240ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Iterable, Optional import httpx from .... import _legacy_response from .runs import ( Runs, AsyncRuns, RunsWithRawResponse, AsyncRunsWithRawResponse, RunsWithStreamingResponse, AsyncRunsWithStreamingResponse, ) from .messages import ( Messages, AsyncMessages, MessagesWithRawResponse, AsyncMessagesWithRawResponse, MessagesWithStreamingResponse, AsyncMessagesWithStreamingResponse, ) from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import maybe_transform from .runs.runs import Runs, AsyncRuns from ...._compat import cached_property from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....types.beta import ( Thread, ThreadDeleted, thread_create_params, thread_update_params, thread_create_and_run_params, ) from ...._base_client import ( make_request_options, ) from .messages.messages import Messages, AsyncMessages from ....types.beta.threads import Run __all__ = ["Threads", "AsyncThreads"] class Threads(SyncAPIResource): @cached_property def runs(self) -> Runs: return Runs(self._client) @cached_property def messages(self) -> Messages: return Messages(self._client) @cached_property def with_raw_response(self) -> ThreadsWithRawResponse: return ThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ThreadsWithStreamingResponse: return ThreadsWithStreamingResponse(self) def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Create a thread. Args: messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( "/threads", body=maybe_transform( { "messages": messages, "metadata": metadata, }, thread_create_params.ThreadCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) def retrieve( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Retrieves a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._get( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) def update( self, thread_id: str, *, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Modifies a thread. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( f"/threads/{thread_id}", body=maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) def delete( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadDeleted: """ Delete a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._delete( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadDeleted, ) def create_and_run( self, *, assistant_id: str, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Create a thread and run it in one request. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. thread: If no thread is provided, an empty thread will be created. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return self._post( "/threads/runs", body=maybe_transform( { "assistant_id": assistant_id, "instructions": instructions, "metadata": metadata, "model": model, "thread": thread, "tools": tools, }, thread_create_and_run_params.ThreadCreateAndRunParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) class AsyncThreads(AsyncAPIResource): @cached_property def runs(self) -> AsyncRuns: return AsyncRuns(self._client) @cached_property def messages(self) -> AsyncMessages: return AsyncMessages(self._client) @cached_property def with_raw_response(self) -> AsyncThreadsWithRawResponse: return AsyncThreadsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncThreadsWithStreamingResponse: return AsyncThreadsWithStreamingResponse(self) async def create( self, *, messages: Iterable[thread_create_params.Message] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Create a thread. Args: messages: A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( "/threads", body=maybe_transform( { "messages": messages, "metadata": metadata, }, thread_create_params.ThreadCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) async def retrieve( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Retrieves a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._get( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) async def update( self, thread_id: str, *, metadata: Optional[object] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Thread: """ Modifies a thread. Args: metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( f"/threads/{thread_id}", body=maybe_transform({"metadata": metadata}, thread_update_params.ThreadUpdateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Thread, ) async def delete( self, thread_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ThreadDeleted: """ Delete a thread. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not thread_id: raise ValueError(f"Expected a non-empty value for `thread_id` but received {thread_id!r}") extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._delete( f"/threads/{thread_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ThreadDeleted, ) async def create_and_run( self, *, assistant_id: str, instructions: Optional[str] | NotGiven = NOT_GIVEN, metadata: Optional[object] | NotGiven = NOT_GIVEN, model: Optional[str] | NotGiven = NOT_GIVEN, thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN, tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Run: """ Create a thread and run it in one request. Args: assistant_id: The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. instructions: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. model: The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. thread: If no thread is provided, an empty thread will be created. tools: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ extra_headers = {"OpenAI-Beta": "assistants=v1", **(extra_headers or {})} return await self._post( "/threads/runs", body=maybe_transform( { "assistant_id": assistant_id, "instructions": instructions, "metadata": metadata, "model": model, "thread": thread, "tools": tools, }, thread_create_and_run_params.ThreadCreateAndRunParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Run, ) class ThreadsWithRawResponse: def __init__(self, threads: Threads) -> None: self._threads = threads self.create = _legacy_response.to_raw_response_wrapper( threads.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( threads.retrieve, ) self.update = _legacy_response.to_raw_response_wrapper( threads.update, ) self.delete = _legacy_response.to_raw_response_wrapper( threads.delete, ) self.create_and_run = _legacy_response.to_raw_response_wrapper( threads.create_and_run, ) @cached_property def runs(self) -> RunsWithRawResponse: return RunsWithRawResponse(self._threads.runs) @cached_property def messages(self) -> MessagesWithRawResponse: return MessagesWithRawResponse(self._threads.messages) class AsyncThreadsWithRawResponse: def __init__(self, threads: AsyncThreads) -> None: self._threads = threads self.create = _legacy_response.async_to_raw_response_wrapper( threads.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( threads.retrieve, ) self.update = _legacy_response.async_to_raw_response_wrapper( threads.update, ) self.delete = _legacy_response.async_to_raw_response_wrapper( threads.delete, ) self.create_and_run = _legacy_response.async_to_raw_response_wrapper( threads.create_and_run, ) @cached_property def runs(self) -> AsyncRunsWithRawResponse: return AsyncRunsWithRawResponse(self._threads.runs) @cached_property def messages(self) -> AsyncMessagesWithRawResponse: return AsyncMessagesWithRawResponse(self._threads.messages) class ThreadsWithStreamingResponse: def __init__(self, threads: Threads) -> None: self._threads = threads self.create = to_streamed_response_wrapper( threads.create, ) self.retrieve = to_streamed_response_wrapper( threads.retrieve, ) self.update = to_streamed_response_wrapper( threads.update, ) self.delete = to_streamed_response_wrapper( threads.delete, ) self.create_and_run = to_streamed_response_wrapper( threads.create_and_run, ) @cached_property def runs(self) -> RunsWithStreamingResponse: return RunsWithStreamingResponse(self._threads.runs) @cached_property def messages(self) -> MessagesWithStreamingResponse: return MessagesWithStreamingResponse(self._threads.messages) class AsyncThreadsWithStreamingResponse: def __init__(self, threads: AsyncThreads) -> None: self._threads = threads self.create = async_to_streamed_response_wrapper( threads.create, ) self.retrieve = async_to_streamed_response_wrapper( threads.retrieve, ) self.update = async_to_streamed_response_wrapper( threads.update, ) self.delete = async_to_streamed_response_wrapper( threads.delete, ) self.create_and_run = async_to_streamed_response_wrapper( threads.create_and_run, ) @cached_property def runs(self) -> AsyncRunsWithStreamingResponse: return AsyncRunsWithStreamingResponse(self._threads.runs) @cached_property def messages(self) -> AsyncMessagesWithStreamingResponse: return AsyncMessagesWithStreamingResponse(self._threads.messages) openai-python-1.12.0/src/openai/resources/chat/000077500000000000000000000000001456126711000213635ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/chat/__init__.py000066400000000000000000000014601456126711000234750ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .chat import ( Chat, AsyncChat, ChatWithRawResponse, AsyncChatWithRawResponse, ChatWithStreamingResponse, AsyncChatWithStreamingResponse, ) from .completions import ( Completions, AsyncCompletions, CompletionsWithRawResponse, AsyncCompletionsWithRawResponse, CompletionsWithStreamingResponse, AsyncCompletionsWithStreamingResponse, ) __all__ = [ "Completions", "AsyncCompletions", "CompletionsWithRawResponse", "AsyncCompletionsWithRawResponse", "CompletionsWithStreamingResponse", "AsyncCompletionsWithStreamingResponse", "Chat", "AsyncChat", "ChatWithRawResponse", "AsyncChatWithRawResponse", "ChatWithStreamingResponse", "AsyncChatWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/chat/chat.py000066400000000000000000000044051456126711000226570ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from .completions import ( Completions, AsyncCompletions, CompletionsWithRawResponse, AsyncCompletionsWithRawResponse, CompletionsWithStreamingResponse, AsyncCompletionsWithStreamingResponse, ) __all__ = ["Chat", "AsyncChat"] class Chat(SyncAPIResource): @cached_property def completions(self) -> Completions: return Completions(self._client) @cached_property def with_raw_response(self) -> ChatWithRawResponse: return ChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> ChatWithStreamingResponse: return ChatWithStreamingResponse(self) class AsyncChat(AsyncAPIResource): @cached_property def completions(self) -> AsyncCompletions: return AsyncCompletions(self._client) @cached_property def with_raw_response(self) -> AsyncChatWithRawResponse: return AsyncChatWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncChatWithStreamingResponse: return AsyncChatWithStreamingResponse(self) class ChatWithRawResponse: def __init__(self, chat: Chat) -> None: self._chat = chat @cached_property def completions(self) -> CompletionsWithRawResponse: return CompletionsWithRawResponse(self._chat.completions) class AsyncChatWithRawResponse: def __init__(self, chat: AsyncChat) -> None: self._chat = chat @cached_property def completions(self) -> AsyncCompletionsWithRawResponse: return AsyncCompletionsWithRawResponse(self._chat.completions) class ChatWithStreamingResponse: def __init__(self, chat: Chat) -> None: self._chat = chat @cached_property def completions(self) -> CompletionsWithStreamingResponse: return CompletionsWithStreamingResponse(self._chat.completions) class AsyncChatWithStreamingResponse: def __init__(self, chat: AsyncChat) -> None: self._chat = chat @cached_property def completions(self) -> AsyncCompletionsWithStreamingResponse: return AsyncCompletionsWithStreamingResponse(self._chat.completions) openai-python-1.12.0/src/openai/resources/chat/completions.py000066400000000000000000002146561456126711000243070ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional, overload from typing_extensions import Literal import httpx from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import required_args, maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..._streaming import Stream, AsyncStream from ...types.chat import ( ChatCompletion, ChatCompletionChunk, ChatCompletionToolParam, ChatCompletionMessageParam, ChatCompletionToolChoiceOptionParam, completion_create_params, ) from ..._base_client import ( make_request_options, ) __all__ = ["Completions", "AsyncCompletions"] class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: return CompletionsWithStreamingResponse(self) @overload def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: Literal[True], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: bool, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @required_args(["messages", "model"], ["messages", "model", "stream"]) def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | Stream[ChatCompletionChunk]: return self._post( "/chat/completions", body=maybe_transform( { "messages": messages, "model": model, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, "stop": stop, "stream": stream, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, }, completion_create_params.CompletionCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ChatCompletion, stream=stream or False, stream_cls=Stream[ChatCompletionChunk], ) class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: return AsyncCompletionsWithStreamingResponse(self) @overload async def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload async def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: Literal[True], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload async def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], stream: bool, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: """ Creates a model response for the given chat conversation. Args: messages: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). model: ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) function_call: Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. functions: Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. logprobs: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) response_format: An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. tool_choice: Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. tools: A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. top_logprobs: An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @required_args(["messages", "model"], ["messages", "model", "stream"]) async def create( self, *, messages: Iterable[ChatCompletionMessageParam], model: Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ], frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN, functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[bool] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN, tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN, top_logprobs: Optional[int] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ChatCompletion | AsyncStream[ChatCompletionChunk]: return await self._post( "/chat/completions", body=maybe_transform( { "messages": messages, "model": model, "frequency_penalty": frequency_penalty, "function_call": function_call, "functions": functions, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, "response_format": response_format, "seed": seed, "stop": stop, "stream": stream, "temperature": temperature, "tool_choice": tool_choice, "tools": tools, "top_logprobs": top_logprobs, "top_p": top_p, "user": user, }, completion_create_params.CompletionCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ChatCompletion, stream=stream or False, stream_cls=AsyncStream[ChatCompletionChunk], ) class CompletionsWithRawResponse: def __init__(self, completions: Completions) -> None: self._completions = completions self.create = _legacy_response.to_raw_response_wrapper( completions.create, ) class AsyncCompletionsWithRawResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = _legacy_response.async_to_raw_response_wrapper( completions.create, ) class CompletionsWithStreamingResponse: def __init__(self, completions: Completions) -> None: self._completions = completions self.create = to_streamed_response_wrapper( completions.create, ) class AsyncCompletionsWithStreamingResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = async_to_streamed_response_wrapper( completions.create, ) openai-python-1.12.0/src/openai/resources/completions.py000066400000000000000000001564121456126711000233630ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional, overload from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import Completion, completion_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import required_args, maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._streaming import Stream, AsyncStream from .._base_client import ( make_request_options, ) __all__ = ["Completions", "AsyncCompletions"] class Completions(SyncAPIResource): @cached_property def with_raw_response(self) -> CompletionsWithRawResponse: return CompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> CompletionsWithStreamingResponse: return CompletionsWithStreamingResponse(self) @overload def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Stream[Completion]: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @required_args(["model", "prompt"], ["model", "prompt", "stream"]) def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | Stream[Completion]: return self._post( "/completions", body=maybe_transform( { "model": model, "prompt": prompt, "best_of": best_of, "echo": echo, "frequency_penalty": frequency_penalty, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, "seed": seed, "stop": stop, "stream": stream, "suffix": suffix, "temperature": temperature, "top_p": top_p, "user": user, }, completion_create_params.CompletionCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Completion, stream=stream or False, stream_cls=Stream[Completion], ) class AsyncCompletions(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncCompletionsWithRawResponse: return AsyncCompletionsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse: return AsyncCompletionsWithStreamingResponse(self) @overload async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: Literal[True], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @overload async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], stream: bool, best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: """ Creates a completion for the provided prompt and parameters. Args: model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. stream: Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). best_of: Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. echo: Echo back the prompt in addition to the completion frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) logit_bias: Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. n: How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. suffix: The suffix that comes after a completion of inserted text. temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. top_p: An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ ... @required_args(["model", "prompt"], ["model", "prompt", "stream"]) async def create( self, *, model: Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]], prompt: Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None], best_of: Optional[int] | NotGiven = NOT_GIVEN, echo: Optional[bool] | NotGiven = NOT_GIVEN, frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN, logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN, logprobs: Optional[int] | NotGiven = NOT_GIVEN, max_tokens: Optional[int] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Completion | AsyncStream[Completion]: return await self._post( "/completions", body=maybe_transform( { "model": model, "prompt": prompt, "best_of": best_of, "echo": echo, "frequency_penalty": frequency_penalty, "logit_bias": logit_bias, "logprobs": logprobs, "max_tokens": max_tokens, "n": n, "presence_penalty": presence_penalty, "seed": seed, "stop": stop, "stream": stream, "suffix": suffix, "temperature": temperature, "top_p": top_p, "user": user, }, completion_create_params.CompletionCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Completion, stream=stream or False, stream_cls=AsyncStream[Completion], ) class CompletionsWithRawResponse: def __init__(self, completions: Completions) -> None: self._completions = completions self.create = _legacy_response.to_raw_response_wrapper( completions.create, ) class AsyncCompletionsWithRawResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = _legacy_response.async_to_raw_response_wrapper( completions.create, ) class CompletionsWithStreamingResponse: def __init__(self, completions: Completions) -> None: self._completions = completions self.create = to_streamed_response_wrapper( completions.create, ) class AsyncCompletionsWithStreamingResponse: def __init__(self, completions: AsyncCompletions) -> None: self._completions = completions self.create = async_to_streamed_response_wrapper( completions.create, ) openai-python-1.12.0/src/openai/resources/embeddings.py000066400000000000000000000246651456126711000231340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import base64 from typing import List, Union, Iterable, cast from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import CreateEmbeddingResponse, embedding_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import is_given, maybe_transform from .._compat import cached_property from .._extras import numpy as np, has_numpy from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import ( make_request_options, ) __all__ = ["Embeddings", "AsyncEmbeddings"] class Embeddings(SyncAPIResource): @cached_property def with_raw_response(self) -> EmbeddingsWithRawResponse: return EmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> EmbeddingsWithStreamingResponse: return EmbeddingsWithStreamingResponse(self) def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. Args: input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. encoding_format: The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ params = { "input": input, "model": model, "user": user, "dimensions": dimensions, "encoding_format": encoding_format, } if not is_given(encoding_format) and has_numpy(): params["encoding_format"] = "base64" def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: if is_given(encoding_format): # don't modify the response object if a user explicitly asked for a format return obj for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): # numpy is not installed / base64 optimisation isn't enabled for this model yet continue embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call] base64.b64decode(data), dtype="float32" ).tolist() return obj return self._post( "/embeddings", body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, post_parser=parser, ), cast_to=CreateEmbeddingResponse, ) class AsyncEmbeddings(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncEmbeddingsWithRawResponse: return AsyncEmbeddingsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncEmbeddingsWithStreamingResponse: return AsyncEmbeddingsWithStreamingResponse(self) async def create( self, *, input: Union[str, List[str], Iterable[int], Iterable[Iterable[int]]], model: Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]], dimensions: int | NotGiven = NOT_GIVEN, encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> CreateEmbeddingResponse: """ Creates an embedding vector representing the input text. Args: input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. dimensions: The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. encoding_format: The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ params = { "input": input, "model": model, "user": user, "dimensions": dimensions, "encoding_format": encoding_format, } if not is_given(encoding_format) and has_numpy(): params["encoding_format"] = "base64" def parser(obj: CreateEmbeddingResponse) -> CreateEmbeddingResponse: if is_given(encoding_format): # don't modify the response object if a user explicitly asked for a format return obj for embedding in obj.data: data = cast(object, embedding.embedding) if not isinstance(data, str): # numpy is not installed / base64 optimisation isn't enabled for this model yet continue embedding.embedding = np.frombuffer( # type: ignore[no-untyped-call] base64.b64decode(data), dtype="float32" ).tolist() return obj return await self._post( "/embeddings", body=maybe_transform(params, embedding_create_params.EmbeddingCreateParams), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, post_parser=parser, ), cast_to=CreateEmbeddingResponse, ) class EmbeddingsWithRawResponse: def __init__(self, embeddings: Embeddings) -> None: self._embeddings = embeddings self.create = _legacy_response.to_raw_response_wrapper( embeddings.create, ) class AsyncEmbeddingsWithRawResponse: def __init__(self, embeddings: AsyncEmbeddings) -> None: self._embeddings = embeddings self.create = _legacy_response.async_to_raw_response_wrapper( embeddings.create, ) class EmbeddingsWithStreamingResponse: def __init__(self, embeddings: Embeddings) -> None: self._embeddings = embeddings self.create = to_streamed_response_wrapper( embeddings.create, ) class AsyncEmbeddingsWithStreamingResponse: def __init__(self, embeddings: AsyncEmbeddings) -> None: self._embeddings = embeddings self.create = async_to_streamed_response_wrapper( embeddings.create, ) openai-python-1.12.0/src/openai/resources/files.py000066400000000000000000000626141456126711000221310ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import time import typing_extensions from typing import Mapping, cast from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import FileObject, FileDeleted, file_list_params, file_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from .._utils import extract_files, maybe_transform, deepcopy_minimal from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import ( StreamedBinaryAPIResponse, AsyncStreamedBinaryAPIResponse, to_streamed_response_wrapper, async_to_streamed_response_wrapper, to_custom_streamed_response_wrapper, async_to_custom_streamed_response_wrapper, ) from ..pagination import SyncPage, AsyncPage from .._base_client import ( AsyncPaginator, make_request_options, ) __all__ = ["Files", "AsyncFiles"] class Files(SyncAPIResource): @cached_property def with_raw_response(self) -> FilesWithRawResponse: return FilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> FilesWithStreamingResponse: return FilesWithStreamingResponse(self) def create( self, *, file: FileTypes, purpose: Literal["fine-tune", "assistants"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. Please [contact us](https://help.openai.com/) if you need to increase these storage limits. Args: file: The File object (not file name) to be uploaded. purpose: The intended purpose of the uploaded file. Use "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and [Messages](https://platform.openai.com/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "purpose": purpose, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/files", body=maybe_transform(body, file_create_params.FileCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileObject, ) def retrieve( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """ Returns information about a specific file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return self._get( f"/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileObject, ) def list( self, *, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncPage[FileObject]: """ Returns a list of files that belong to the user's organization. Args: purpose: Only return files with the given purpose. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/files", page=SyncPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), ), model=FileObject, ) def delete( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleted: """ Delete a file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return self._delete( f"/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileDeleted, ) def content( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> _legacy_response.HttpxBinaryResponseContent: """ Returns the contents of the specified file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return self._get( f"/files/{file_id}/content", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=_legacy_response.HttpxBinaryResponseContent, ) @typing_extensions.deprecated("The `.content()` method should be used instead") def retrieve_content( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> str: """ Returns the contents of the specified file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"Accept": "application/json", **(extra_headers or {})} return self._get( f"/files/{file_id}/content", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=str, ) def wait_for_processing( self, id: str, *, poll_interval: float = 5.0, max_wait_seconds: float = 30 * 60, ) -> FileObject: """Waits for the given file to be processed, default timeout is 30 mins.""" TERMINAL_STATES = {"processed", "error", "deleted"} start = time.time() file = self.retrieve(id) while file.status not in TERMINAL_STATES: self._sleep(poll_interval) file = self.retrieve(id) if time.time() - start > max_wait_seconds: raise RuntimeError( f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds." ) return file class AsyncFiles(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncFilesWithRawResponse: return AsyncFilesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFilesWithStreamingResponse: return AsyncFilesWithStreamingResponse(self) async def create( self, *, file: FileTypes, purpose: Literal["fine-tune", "assistants"], # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. Please [contact us](https://help.openai.com/) if you need to increase these storage limits. Args: file: The File object (not file name) to be uploaded. purpose: The intended purpose of the uploaded file. Use "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and [Messages](https://platform.openai.com/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "file": file, "purpose": purpose, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["file"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/files", body=maybe_transform(body, file_create_params.FileCreateParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileObject, ) async def retrieve( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileObject: """ Returns information about a specific file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return await self._get( f"/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileObject, ) def list( self, *, purpose: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FileObject, AsyncPage[FileObject]]: """ Returns a list of files that belong to the user's organization. Args: purpose: Only return files with the given purpose. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/files", page=AsyncPage[FileObject], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform({"purpose": purpose}, file_list_params.FileListParams), ), model=FileObject, ) async def delete( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FileDeleted: """ Delete a file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return await self._delete( f"/files/{file_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FileDeleted, ) async def content( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> _legacy_response.HttpxBinaryResponseContent: """ Returns the contents of the specified file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") return await self._get( f"/files/{file_id}/content", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=_legacy_response.HttpxBinaryResponseContent, ) @typing_extensions.deprecated("The `.content()` method should be used instead") async def retrieve_content( self, file_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> str: """ Returns the contents of the specified file. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not file_id: raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}") extra_headers = {"Accept": "application/json", **(extra_headers or {})} return await self._get( f"/files/{file_id}/content", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=str, ) async def wait_for_processing( self, id: str, *, poll_interval: float = 5.0, max_wait_seconds: float = 30 * 60, ) -> FileObject: """Waits for the given file to be processed, default timeout is 30 mins.""" TERMINAL_STATES = {"processed", "error", "deleted"} start = time.time() file = await self.retrieve(id) while file.status not in TERMINAL_STATES: await self._sleep(poll_interval) file = await self.retrieve(id) if time.time() - start > max_wait_seconds: raise RuntimeError( f"Giving up on waiting for file {id} to finish processing after {max_wait_seconds} seconds." ) return file class FilesWithRawResponse: def __init__(self, files: Files) -> None: self._files = files self.create = _legacy_response.to_raw_response_wrapper( files.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.to_raw_response_wrapper( files.delete, ) self.content = _legacy_response.to_raw_response_wrapper( files.content, ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] _legacy_response.to_raw_response_wrapper( files.retrieve_content # pyright: ignore[reportDeprecated], ) ) class AsyncFilesWithRawResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.create = _legacy_response.async_to_raw_response_wrapper( files.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( files.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( files.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( files.delete, ) self.content = _legacy_response.async_to_raw_response_wrapper( files.content, ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( files.retrieve_content # pyright: ignore[reportDeprecated], ) ) class FilesWithStreamingResponse: def __init__(self, files: Files) -> None: self._files = files self.create = to_streamed_response_wrapper( files.create, ) self.retrieve = to_streamed_response_wrapper( files.retrieve, ) self.list = to_streamed_response_wrapper( files.list, ) self.delete = to_streamed_response_wrapper( files.delete, ) self.content = to_custom_streamed_response_wrapper( files.content, StreamedBinaryAPIResponse, ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] to_streamed_response_wrapper( files.retrieve_content # pyright: ignore[reportDeprecated], ) ) class AsyncFilesWithStreamingResponse: def __init__(self, files: AsyncFiles) -> None: self._files = files self.create = async_to_streamed_response_wrapper( files.create, ) self.retrieve = async_to_streamed_response_wrapper( files.retrieve, ) self.list = async_to_streamed_response_wrapper( files.list, ) self.delete = async_to_streamed_response_wrapper( files.delete, ) self.content = async_to_custom_streamed_response_wrapper( files.content, AsyncStreamedBinaryAPIResponse, ) self.retrieve_content = ( # pyright: ignore[reportDeprecated] async_to_streamed_response_wrapper( files.retrieve_content # pyright: ignore[reportDeprecated], ) ) openai-python-1.12.0/src/openai/resources/fine_tuning/000077500000000000000000000000001456126711000227515ustar00rootroot00000000000000openai-python-1.12.0/src/openai/resources/fine_tuning/__init__.py000066400000000000000000000014441456126711000250650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .jobs import ( Jobs, AsyncJobs, JobsWithRawResponse, AsyncJobsWithRawResponse, JobsWithStreamingResponse, AsyncJobsWithStreamingResponse, ) from .fine_tuning import ( FineTuning, AsyncFineTuning, FineTuningWithRawResponse, AsyncFineTuningWithRawResponse, FineTuningWithStreamingResponse, AsyncFineTuningWithStreamingResponse, ) __all__ = [ "Jobs", "AsyncJobs", "JobsWithRawResponse", "AsyncJobsWithRawResponse", "JobsWithStreamingResponse", "AsyncJobsWithStreamingResponse", "FineTuning", "AsyncFineTuning", "FineTuningWithRawResponse", "AsyncFineTuningWithRawResponse", "FineTuningWithStreamingResponse", "AsyncFineTuningWithStreamingResponse", ] openai-python-1.12.0/src/openai/resources/fine_tuning/fine_tuning.py000066400000000000000000000044421456126711000256340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .jobs import ( Jobs, AsyncJobs, JobsWithRawResponse, AsyncJobsWithRawResponse, JobsWithStreamingResponse, AsyncJobsWithStreamingResponse, ) from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource __all__ = ["FineTuning", "AsyncFineTuning"] class FineTuning(SyncAPIResource): @cached_property def jobs(self) -> Jobs: return Jobs(self._client) @cached_property def with_raw_response(self) -> FineTuningWithRawResponse: return FineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> FineTuningWithStreamingResponse: return FineTuningWithStreamingResponse(self) class AsyncFineTuning(AsyncAPIResource): @cached_property def jobs(self) -> AsyncJobs: return AsyncJobs(self._client) @cached_property def with_raw_response(self) -> AsyncFineTuningWithRawResponse: return AsyncFineTuningWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse: return AsyncFineTuningWithStreamingResponse(self) class FineTuningWithRawResponse: def __init__(self, fine_tuning: FineTuning) -> None: self._fine_tuning = fine_tuning @cached_property def jobs(self) -> JobsWithRawResponse: return JobsWithRawResponse(self._fine_tuning.jobs) class AsyncFineTuningWithRawResponse: def __init__(self, fine_tuning: AsyncFineTuning) -> None: self._fine_tuning = fine_tuning @cached_property def jobs(self) -> AsyncJobsWithRawResponse: return AsyncJobsWithRawResponse(self._fine_tuning.jobs) class FineTuningWithStreamingResponse: def __init__(self, fine_tuning: FineTuning) -> None: self._fine_tuning = fine_tuning @cached_property def jobs(self) -> JobsWithStreamingResponse: return JobsWithStreamingResponse(self._fine_tuning.jobs) class AsyncFineTuningWithStreamingResponse: def __init__(self, fine_tuning: AsyncFineTuning) -> None: self._fine_tuning = fine_tuning @cached_property def jobs(self) -> AsyncJobsWithStreamingResponse: return AsyncJobsWithStreamingResponse(self._fine_tuning.jobs) openai-python-1.12.0/src/openai/resources/fine_tuning/jobs.py000066400000000000000000000576071456126711000242770ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal import httpx from ... import _legacy_response from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ..._utils import maybe_transform from ..._compat import cached_property from ..._resource import SyncAPIResource, AsyncAPIResource from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ...pagination import SyncCursorPage, AsyncCursorPage from ..._base_client import ( AsyncPaginator, make_request_options, ) from ...types.fine_tuning import ( FineTuningJob, FineTuningJobEvent, job_list_params, job_create_params, job_list_events_params, ) __all__ = ["Jobs", "AsyncJobs"] class Jobs(SyncAPIResource): @cached_property def with_raw_response(self) -> JobsWithRawResponse: return JobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> JobsWithStreamingResponse: return JobsWithStreamingResponse(self) def create( self, *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: model: The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. suffix: A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/fine_tuning/jobs", body=maybe_transform( { "model": model, "training_file": training_file, "hyperparameters": hyperparameters, "suffix": suffix, "validation_file": validation_file, }, job_create_params.JobCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def retrieve( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJob]: """ List your organization's fine-tuning jobs Args: after: Identifier for the last job from the previous pagination request. limit: Number of fine-tuning jobs to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/fine_tuning/jobs", page=SyncCursorPage[FineTuningJob], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_params.JobListParams, ), ), model=FineTuningJob, ) def cancel( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._post( f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list_events( self, fine_tuning_job_id: str, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJobEvent]: """ Get status updates for a fine-tuning job. Args: after: Identifier for the last event from the previous pagination request. limit: Number of events to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get_api_list( f"/fine_tuning/jobs/{fine_tuning_job_id}/events", page=SyncCursorPage[FineTuningJobEvent], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_events_params.JobListEventsParams, ), ), model=FineTuningJobEvent, ) class AsyncJobs(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: return AsyncJobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: return AsyncJobsWithStreamingResponse(self) async def create( self, *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: model: The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. suffix: A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( "/fine_tuning/jobs", body=maybe_transform( { "model": model, "training_file": training_file, "hyperparameters": hyperparameters, "suffix": suffix, "validation_file": validation_file, }, job_create_params.JobCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) async def retrieve( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return await self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]: """ List your organization's fine-tuning jobs Args: after: Identifier for the last job from the previous pagination request. limit: Number of fine-tuning jobs to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/fine_tuning/jobs", page=AsyncCursorPage[FineTuningJob], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_params.JobListParams, ), ), model=FineTuningJob, ) async def cancel( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return await self._post( f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list_events( self, fine_tuning_job_id: str, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]: """ Get status updates for a fine-tuning job. Args: after: Identifier for the last event from the previous pagination request. limit: Number of events to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get_api_list( f"/fine_tuning/jobs/{fine_tuning_job_id}/events", page=AsyncCursorPage[FineTuningJobEvent], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_events_params.JobListEventsParams, ), ), model=FineTuningJobEvent, ) class JobsWithRawResponse: def __init__(self, jobs: Jobs) -> None: self._jobs = jobs self.create = _legacy_response.to_raw_response_wrapper( jobs.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( jobs.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( jobs.list, ) self.cancel = _legacy_response.to_raw_response_wrapper( jobs.cancel, ) self.list_events = _legacy_response.to_raw_response_wrapper( jobs.list_events, ) class AsyncJobsWithRawResponse: def __init__(self, jobs: AsyncJobs) -> None: self._jobs = jobs self.create = _legacy_response.async_to_raw_response_wrapper( jobs.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( jobs.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( jobs.list, ) self.cancel = _legacy_response.async_to_raw_response_wrapper( jobs.cancel, ) self.list_events = _legacy_response.async_to_raw_response_wrapper( jobs.list_events, ) class JobsWithStreamingResponse: def __init__(self, jobs: Jobs) -> None: self._jobs = jobs self.create = to_streamed_response_wrapper( jobs.create, ) self.retrieve = to_streamed_response_wrapper( jobs.retrieve, ) self.list = to_streamed_response_wrapper( jobs.list, ) self.cancel = to_streamed_response_wrapper( jobs.cancel, ) self.list_events = to_streamed_response_wrapper( jobs.list_events, ) class AsyncJobsWithStreamingResponse: def __init__(self, jobs: AsyncJobs) -> None: self._jobs = jobs self.create = async_to_streamed_response_wrapper( jobs.create, ) self.retrieve = async_to_streamed_response_wrapper( jobs.retrieve, ) self.list = async_to_streamed_response_wrapper( jobs.list, ) self.cancel = async_to_streamed_response_wrapper( jobs.cancel, ) self.list_events = async_to_streamed_response_wrapper( jobs.list_events, ) openai-python-1.12.0/src/openai/resources/images.py000066400000000000000000000571371456126711000223000ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Mapping, Optional, cast from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import ( ImagesResponse, image_edit_params, image_generate_params, image_create_variation_params, ) from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes from .._utils import extract_files, maybe_transform, deepcopy_minimal from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import ( make_request_options, ) __all__ = ["Images", "AsyncImages"] class Images(SyncAPIResource): @cached_property def with_raw_response(self) -> ImagesWithRawResponse: return ImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> ImagesWithStreamingResponse: return ImagesWithStreamingResponse(self) def create_variation( self, *, image: FileTypes, model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates a variation of a given image. Args: image: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. model: The model to use for image generation. Only `dall-e-2` is supported at this time. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "image": image, "model": model, "n": n, "response_format": response_format, "size": size, "user": user, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/images/variations", body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) def edit( self, *, image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an edited or extended image given an original image and a prompt. Args: image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. prompt: A text description of the desired image(s). The maximum length is 1000 characters. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. model: The model to use for image generation. Only `dall-e-2` is supported at this time. n: The number of images to generate. Must be between 1 and 10. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "image": image, "prompt": prompt, "mask": mask, "model": model, "n": n, "response_format": response_format, "size": size, "user": user, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return self._post( "/images/edits", body=maybe_transform(body, image_edit_params.ImageEditParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) def generate( self, *, prompt: str, model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an image given a prompt. Args: prompt: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. model: The model to use for image generation. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/images/generations", body=maybe_transform( { "prompt": prompt, "model": model, "n": n, "quality": quality, "response_format": response_format, "size": size, "style": style, "user": user, }, image_generate_params.ImageGenerateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) class AsyncImages(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncImagesWithRawResponse: return AsyncImagesWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncImagesWithStreamingResponse: return AsyncImagesWithStreamingResponse(self) async def create_variation( self, *, image: FileTypes, model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates a variation of a given image. Args: image: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. model: The model to use for image generation. Only `dall-e-2` is supported at this time. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "image": image, "model": model, "n": n, "response_format": response_format, "size": size, "user": user, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/images/variations", body=maybe_transform(body, image_create_variation_params.ImageCreateVariationParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) async def edit( self, *, image: FileTypes, prompt: str, mask: FileTypes | NotGiven = NOT_GIVEN, model: Union[str, Literal["dall-e-2"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an edited or extended image given an original image and a prompt. Args: image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. prompt: A text description of the desired image(s). The maximum length is 1000 characters. mask: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. model: The model to use for image generation. Only `dall-e-2` is supported at this time. n: The number of images to generate. Must be between 1 and 10. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ body = deepcopy_minimal( { "image": image, "prompt": prompt, "mask": mask, "model": model, "n": n, "response_format": response_format, "size": size, "user": user, } ) files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]]) if files: # It should be noted that the actual Content-Type header that will be # sent to the server will contain a `boundary` parameter, e.g. # multipart/form-data; boundary=---abc-- extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})} return await self._post( "/images/edits", body=maybe_transform(body, image_edit_params.ImageEditParams), files=files, options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) async def generate( self, *, prompt: str, model: Union[str, Literal["dall-e-2", "dall-e-3"], None] | NotGiven = NOT_GIVEN, n: Optional[int] | NotGiven = NOT_GIVEN, quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN, response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN, size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN, style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN, user: str | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ImagesResponse: """ Creates an image given a prompt. Args: prompt: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. model: The model to use for image generation. n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. quality: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. response_format: The format in which the generated images are returned. Must be one of `url` or `b64_json`. size: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. user: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( "/images/generations", body=maybe_transform( { "prompt": prompt, "model": model, "n": n, "quality": quality, "response_format": response_format, "size": size, "style": style, "user": user, }, image_generate_params.ImageGenerateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ImagesResponse, ) class ImagesWithRawResponse: def __init__(self, images: Images) -> None: self._images = images self.create_variation = _legacy_response.to_raw_response_wrapper( images.create_variation, ) self.edit = _legacy_response.to_raw_response_wrapper( images.edit, ) self.generate = _legacy_response.to_raw_response_wrapper( images.generate, ) class AsyncImagesWithRawResponse: def __init__(self, images: AsyncImages) -> None: self._images = images self.create_variation = _legacy_response.async_to_raw_response_wrapper( images.create_variation, ) self.edit = _legacy_response.async_to_raw_response_wrapper( images.edit, ) self.generate = _legacy_response.async_to_raw_response_wrapper( images.generate, ) class ImagesWithStreamingResponse: def __init__(self, images: Images) -> None: self._images = images self.create_variation = to_streamed_response_wrapper( images.create_variation, ) self.edit = to_streamed_response_wrapper( images.edit, ) self.generate = to_streamed_response_wrapper( images.generate, ) class AsyncImagesWithStreamingResponse: def __init__(self, images: AsyncImages) -> None: self._images = images self.create_variation = async_to_streamed_response_wrapper( images.create_variation, ) self.edit = async_to_streamed_response_wrapper( images.edit, ) self.generate = async_to_streamed_response_wrapper( images.generate, ) openai-python-1.12.0/src/openai/resources/models.py000066400000000000000000000236461456126711000223140ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import httpx from .. import _legacy_response from ..types import Model, ModelDeleted from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ..pagination import SyncPage, AsyncPage from .._base_client import ( AsyncPaginator, make_request_options, ) __all__ = ["Models", "AsyncModels"] class Models(SyncAPIResource): @cached_property def with_raw_response(self) -> ModelsWithRawResponse: return ModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModelsWithStreamingResponse: return ModelsWithStreamingResponse(self) def retrieve( self, model: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._get( f"/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Model, ) def list( self, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncPage[Model]: """ Lists the currently available models, and provides basic information about each one such as the owner and availability. """ return self._get_api_list( "/models", page=SyncPage[Model], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), model=Model, ) def delete( self, model: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelDeleted: """Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return self._delete( f"/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelDeleted, ) class AsyncModels(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModelsWithRawResponse: return AsyncModelsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModelsWithStreamingResponse: return AsyncModelsWithStreamingResponse(self) async def retrieve( self, model: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> Model: """ Retrieves a model instance, providing basic information about the model such as the owner and permissioning. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._get( f"/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=Model, ) def list( self, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[Model, AsyncPage[Model]]: """ Lists the currently available models, and provides basic information about each one such as the owner and availability. """ return self._get_api_list( "/models", page=AsyncPage[Model], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), model=Model, ) async def delete( self, model: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModelDeleted: """Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not model: raise ValueError(f"Expected a non-empty value for `model` but received {model!r}") return await self._delete( f"/models/{model}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModelDeleted, ) class ModelsWithRawResponse: def __init__(self, models: Models) -> None: self._models = models self.retrieve = _legacy_response.to_raw_response_wrapper( models.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( models.list, ) self.delete = _legacy_response.to_raw_response_wrapper( models.delete, ) class AsyncModelsWithRawResponse: def __init__(self, models: AsyncModels) -> None: self._models = models self.retrieve = _legacy_response.async_to_raw_response_wrapper( models.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( models.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( models.delete, ) class ModelsWithStreamingResponse: def __init__(self, models: Models) -> None: self._models = models self.retrieve = to_streamed_response_wrapper( models.retrieve, ) self.list = to_streamed_response_wrapper( models.list, ) self.delete = to_streamed_response_wrapper( models.delete, ) class AsyncModelsWithStreamingResponse: def __init__(self, models: AsyncModels) -> None: self._models = models self.retrieve = async_to_streamed_response_wrapper( models.retrieve, ) self.list = async_to_streamed_response_wrapper( models.list, ) self.delete = async_to_streamed_response_wrapper( models.delete, ) openai-python-1.12.0/src/openai/resources/moderations.py000066400000000000000000000146601456126711000233510ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union from typing_extensions import Literal import httpx from .. import _legacy_response from ..types import ModerationCreateResponse, moderation_create_params from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven from .._utils import maybe_transform from .._compat import cached_property from .._resource import SyncAPIResource, AsyncAPIResource from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from .._base_client import ( make_request_options, ) __all__ = ["Moderations", "AsyncModerations"] class Moderations(SyncAPIResource): @cached_property def with_raw_response(self) -> ModerationsWithRawResponse: return ModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> ModerationsWithStreamingResponse: return ModerationsWithStreamingResponse(self) def create( self, *, input: Union[str, List[str]], model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: """ Classifies if text violates OpenAI's Content Policy Args: input: The input text to classify model: Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/moderations", body=maybe_transform( { "input": input, "model": model, }, moderation_create_params.ModerationCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModerationCreateResponse, ) class AsyncModerations(AsyncAPIResource): @cached_property def with_raw_response(self) -> AsyncModerationsWithRawResponse: return AsyncModerationsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncModerationsWithStreamingResponse: return AsyncModerationsWithStreamingResponse(self) async def create( self, *, input: Union[str, List[str]], model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> ModerationCreateResponse: """ Classifies if text violates OpenAI's Content Policy Args: input: The input text to classify model: Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( "/moderations", body=maybe_transform( { "input": input, "model": model, }, moderation_create_params.ModerationCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=ModerationCreateResponse, ) class ModerationsWithRawResponse: def __init__(self, moderations: Moderations) -> None: self._moderations = moderations self.create = _legacy_response.to_raw_response_wrapper( moderations.create, ) class AsyncModerationsWithRawResponse: def __init__(self, moderations: AsyncModerations) -> None: self._moderations = moderations self.create = _legacy_response.async_to_raw_response_wrapper( moderations.create, ) class ModerationsWithStreamingResponse: def __init__(self, moderations: Moderations) -> None: self._moderations = moderations self.create = to_streamed_response_wrapper( moderations.create, ) class AsyncModerationsWithStreamingResponse: def __init__(self, moderations: AsyncModerations) -> None: self._moderations = moderations self.create = async_to_streamed_response_wrapper( moderations.create, ) openai-python-1.12.0/src/openai/types/000077500000000000000000000000001456126711000175765ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/__init__.py000066400000000000000000000031351456126711000217110ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .image import Image as Image from .model import Model as Model from .shared import FunctionDefinition as FunctionDefinition, FunctionParameters as FunctionParameters from .embedding import Embedding as Embedding from .completion import Completion as Completion from .moderation import Moderation as Moderation from .file_object import FileObject as FileObject from .file_content import FileContent as FileContent from .file_deleted import FileDeleted as FileDeleted from .model_deleted import ModelDeleted as ModelDeleted from .images_response import ImagesResponse as ImagesResponse from .completion_usage import CompletionUsage as CompletionUsage from .file_list_params import FileListParams as FileListParams from .completion_choice import CompletionChoice as CompletionChoice from .image_edit_params import ImageEditParams as ImageEditParams from .file_create_params import FileCreateParams as FileCreateParams from .image_generate_params import ImageGenerateParams as ImageGenerateParams from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .moderation_create_params import ModerationCreateParams as ModerationCreateParams from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse from .moderation_create_response import ModerationCreateResponse as ModerationCreateResponse from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams openai-python-1.12.0/src/openai/types/audio/000077500000000000000000000000001456126711000206775ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/audio/__init__.py000066400000000000000000000007151456126711000230130ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .translation import Translation as Translation from .transcription import Transcription as Transcription from .speech_create_params import SpeechCreateParams as SpeechCreateParams from .translation_create_params import TranslationCreateParams as TranslationCreateParams from .transcription_create_params import TranscriptionCreateParams as TranscriptionCreateParams openai-python-1.12.0/src/openai/types/audio/speech_create_params.py000066400000000000000000000022671456126711000254150ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from typing_extensions import Literal, Required, TypedDict __all__ = ["SpeechCreateParams"] class SpeechCreateParams(TypedDict, total=False): input: Required[str] """The text to generate audio for. The maximum length is 4096 characters.""" model: Required[Union[str, Literal["tts-1", "tts-1-hd"]]] """ One of the available [TTS models](https://platform.openai.com/docs/models/tts): `tts-1` or `tts-1-hd` """ voice: Required[Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"]] """The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options). """ response_format: Literal["mp3", "opus", "aac", "flac"] """The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`.""" speed: float """The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default. """ openai-python-1.12.0/src/openai/types/audio/transcription.py000066400000000000000000000002441456126711000241500ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from ..._models import BaseModel __all__ = ["Transcription"] class Transcription(BaseModel): text: str openai-python-1.12.0/src/openai/types/audio/transcription_create_params.py000066400000000000000000000037421456126711000270440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union from typing_extensions import Literal, Required, TypedDict from ..._types import FileTypes __all__ = ["TranscriptionCreateParams"] class TranscriptionCreateParams(TypedDict, total=False): file: Required[FileTypes] """ The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. """ model: Required[Union[str, Literal["whisper-1"]]] """ID of the model to use. Only `whisper-1` is currently available.""" language: str """The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. """ prompt: str """An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should match the audio language. """ response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] """ The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. """ temperature: float """The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. """ timestamp_granularities: List[Literal["word", "segment"]] """The timestamp granularities to populate for this transcription. Any of these options: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. """ openai-python-1.12.0/src/openai/types/audio/translation.py000066400000000000000000000002401456126711000236030ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from ..._models import BaseModel __all__ = ["Translation"] class Translation(BaseModel): text: str openai-python-1.12.0/src/openai/types/audio/translation_create_params.py000066400000000000000000000025741456126711000265050ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from typing_extensions import Literal, Required, TypedDict from ..._types import FileTypes __all__ = ["TranslationCreateParams"] class TranslationCreateParams(TypedDict, total=False): file: Required[FileTypes] """ The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. """ model: Required[Union[str, Literal["whisper-1"]]] """ID of the model to use. Only `whisper-1` is currently available.""" prompt: str """An optional text to guide the model's style or continue a previous audio segment. The [prompt](https://platform.openai.com/docs/guides/speech-to-text/prompting) should be in English. """ response_format: str """ The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. """ temperature: float """The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. """ openai-python-1.12.0/src/openai/types/beta/000077500000000000000000000000001456126711000205115ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/__init__.py000066400000000000000000000014271456126711000226260ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .thread import Thread as Thread from .assistant import Assistant as Assistant from .thread_deleted import ThreadDeleted as ThreadDeleted from .assistant_deleted import AssistantDeleted as AssistantDeleted from .thread_create_params import ThreadCreateParams as ThreadCreateParams from .thread_update_params import ThreadUpdateParams as ThreadUpdateParams from .assistant_list_params import AssistantListParams as AssistantListParams from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams from .thread_create_and_run_params import ThreadCreateAndRunParams as ThreadCreateAndRunParams openai-python-1.12.0/src/openai/types/beta/assistant.py000066400000000000000000000050021456126711000230710ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union, Optional from typing_extensions import Literal from ..shared import FunctionDefinition from ..._models import BaseModel __all__ = ["Assistant", "Tool", "ToolCodeInterpreter", "ToolRetrieval", "ToolFunction"] class ToolCodeInterpreter(BaseModel): type: Literal["code_interpreter"] """The type of tool being defined: `code_interpreter`""" class ToolRetrieval(BaseModel): type: Literal["retrieval"] """The type of tool being defined: `retrieval`""" class ToolFunction(BaseModel): function: FunctionDefinition type: Literal["function"] """The type of tool being defined: `function`""" Tool = Union[ToolCodeInterpreter, ToolRetrieval, ToolFunction] class Assistant(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the assistant was created.""" description: Optional[str] = None """The description of the assistant. The maximum length is 512 characters.""" file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. """ instructions: Optional[str] = None """The system instructions that the assistant uses. The maximum length is 32768 characters. """ metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: str """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ name: Optional[str] = None """The name of the assistant. The maximum length is 256 characters.""" object: Literal["assistant"] """The object type, which is always `assistant`.""" tools: List[Tool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. """ openai-python-1.12.0/src/openai/types/beta/assistant_create_params.py000066400000000000000000000047711456126711000257730ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params __all__ = [ "AssistantCreateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", ] class AssistantCreateParams(TypedDict, total=False): model: Required[str] """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ description: Optional[str] """The description of the assistant. The maximum length is 512 characters.""" file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. """ instructions: Optional[str] """The system instructions that the assistant uses. The maximum length is 32768 characters. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" tools: Iterable[Tool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. """ class ToolAssistantToolsCode(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`""" class ToolAssistantToolsRetrieval(TypedDict, total=False): type: Required[Literal["retrieval"]] """The type of tool being defined: `retrieval`""" class ToolAssistantToolsFunction(TypedDict, total=False): function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" Tool = Union[ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction] openai-python-1.12.0/src/openai/types/beta/assistant_deleted.py000066400000000000000000000004141456126711000245610ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ..._models import BaseModel __all__ = ["AssistantDeleted"] class AssistantDeleted(BaseModel): id: str deleted: bool object: Literal["assistant.deleted"] openai-python-1.12.0/src/openai/types/beta/assistant_list_params.py000066400000000000000000000022431456126711000254730ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, TypedDict __all__ = ["AssistantListParams"] class AssistantListParams(TypedDict, total=False): after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/assistant_update_params.py000066400000000000000000000051561456126711000260100ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params __all__ = [ "AssistantUpdateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", ] class AssistantUpdateParams(TypedDict, total=False): description: Optional[str] """The description of the assistant. The maximum length is 512 characters.""" file_ids: List[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. """ instructions: Optional[str] """The system instructions that the assistant uses. The maximum length is 32768 characters. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: str """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ name: Optional[str] """The name of the assistant. The maximum length is 256 characters.""" tools: Iterable[Tool] """A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. """ class ToolAssistantToolsCode(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`""" class ToolAssistantToolsRetrieval(TypedDict, total=False): type: Required[Literal["retrieval"]] """The type of tool being defined: `retrieval`""" class ToolAssistantToolsFunction(TypedDict, total=False): function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" Tool = Union[ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction] openai-python-1.12.0/src/openai/types/beta/assistants/000077500000000000000000000000001456126711000227055ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/assistants/__init__.py000066400000000000000000000005441456126711000250210ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .assistant_file import AssistantFile as AssistantFile from .file_list_params import FileListParams as FileListParams from .file_create_params import FileCreateParams as FileCreateParams from .file_delete_response import FileDeleteResponse as FileDeleteResponse openai-python-1.12.0/src/openai/types/beta/assistants/assistant_file.py000066400000000000000000000010521456126711000262650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ...._models import BaseModel __all__ = ["AssistantFile"] class AssistantFile(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" assistant_id: str """The assistant ID that the file is attached to.""" created_at: int """The Unix timestamp (in seconds) for when the assistant file was created.""" object: Literal["assistant.file"] """The object type, which is always `assistant.file`.""" openai-python-1.12.0/src/openai/types/beta/assistants/file_create_params.py000066400000000000000000000007441456126711000270710ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Required, TypedDict __all__ = ["FileCreateParams"] class FileCreateParams(TypedDict, total=False): file_id: Required[str] """ A [File](https://platform.openai.com/docs/api-reference/files) ID (with `purpose="assistants"`) that the assistant should use. Useful for tools like `retrieval` and `code_interpreter` that can access files. """ openai-python-1.12.0/src/openai/types/beta/assistants/file_delete_response.py000066400000000000000000000004261456126711000274400ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ...._models import BaseModel __all__ = ["FileDeleteResponse"] class FileDeleteResponse(BaseModel): id: str deleted: bool object: Literal["assistant.file.deleted"] openai-python-1.12.0/src/openai/types/beta/assistants/file_list_params.py000066400000000000000000000022311456126711000265720ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, TypedDict __all__ = ["FileListParams"] class FileListParams(TypedDict, total=False): after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/chat/000077500000000000000000000000001456126711000214305ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/chat/__init__.py000066400000000000000000000001311456126711000235340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations openai-python-1.12.0/src/openai/types/beta/thread.py000066400000000000000000000014151456126711000223330ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Optional from typing_extensions import Literal from ..._models import BaseModel __all__ = ["Thread"] class Thread(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the thread was created.""" metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ object: Literal["thread"] """The object type, which is always `thread`.""" openai-python-1.12.0/src/openai/types/beta/thread_create_and_run_params.py000066400000000000000000000072341456126711000267340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params __all__ = [ "ThreadCreateAndRunParams", "Thread", "ThreadMessage", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", ] class ThreadCreateAndRunParams(TypedDict, total=False): assistant_id: Required[str] """ The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. """ instructions: Optional[str] """Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: Optional[str] """ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. """ thread: Thread """If no thread is provided, an empty thread will be created.""" tools: Optional[Iterable[Tool]] """Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. """ class ThreadMessage(TypedDict, total=False): content: Required[str] """The content of the message.""" role: Required[Literal["user"]] """The role of the entity that is creating the message. Currently only `user` is supported. """ file_ids: List[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ class Thread(TypedDict, total=False): messages: Iterable[ThreadMessage] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ class ToolAssistantToolsCode(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`""" class ToolAssistantToolsRetrieval(TypedDict, total=False): type: Required[Literal["retrieval"]] """The type of tool being defined: `retrieval`""" class ToolAssistantToolsFunction(TypedDict, total=False): function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" Tool = Union[ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction] openai-python-1.12.0/src/openai/types/beta/thread_create_params.py000066400000000000000000000031371456126711000252240ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Iterable, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ThreadCreateParams", "Message"] class ThreadCreateParams(TypedDict, total=False): messages: Iterable[Message] """ A list of [messages](https://platform.openai.com/docs/api-reference/messages) to start the thread with. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ class Message(TypedDict, total=False): content: Required[str] """The content of the message.""" role: Required[Literal["user"]] """The role of the entity that is creating the message. Currently only `user` is supported. """ file_ids: List[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ openai-python-1.12.0/src/openai/types/beta/thread_deleted.py000066400000000000000000000004031456126711000240150ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ..._models import BaseModel __all__ = ["ThreadDeleted"] class ThreadDeleted(BaseModel): id: str deleted: bool object: Literal["thread.deleted"] openai-python-1.12.0/src/openai/types/beta/thread_update_params.py000066400000000000000000000010521456126711000252350ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Optional from typing_extensions import TypedDict __all__ = ["ThreadUpdateParams"] class ThreadUpdateParams(TypedDict, total=False): metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ openai-python-1.12.0/src/openai/types/beta/threads/000077500000000000000000000000001456126711000221435ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/threads/__init__.py000066400000000000000000000017211456126711000242550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .run import Run as Run from .thread_message import ThreadMessage as ThreadMessage from .run_list_params import RunListParams as RunListParams from .run_create_params import RunCreateParams as RunCreateParams from .run_update_params import RunUpdateParams as RunUpdateParams from .message_list_params import MessageListParams as MessageListParams from .message_content_text import MessageContentText as MessageContentText from .message_create_params import MessageCreateParams as MessageCreateParams from .message_update_params import MessageUpdateParams as MessageUpdateParams from .message_content_image_file import MessageContentImageFile as MessageContentImageFile from .run_submit_tool_outputs_params import RunSubmitToolOutputsParams as RunSubmitToolOutputsParams from .required_action_function_tool_call import RequiredActionFunctionToolCall as RequiredActionFunctionToolCall openai-python-1.12.0/src/openai/types/beta/threads/message_content_image_file.py000066400000000000000000000007511456126711000300370ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ...._models import BaseModel __all__ = ["MessageContentImageFile", "ImageFile"] class ImageFile(BaseModel): file_id: str """ The [File](https://platform.openai.com/docs/api-reference/files) ID of the image in the message content. """ class MessageContentImageFile(BaseModel): image_file: ImageFile type: Literal["image_file"] """Always `image_file`.""" openai-python-1.12.0/src/openai/types/beta/threads/message_content_text.py000066400000000000000000000030471456126711000267430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union from typing_extensions import Literal from ...._models import BaseModel __all__ = [ "MessageContentText", "Text", "TextAnnotation", "TextAnnotationFileCitation", "TextAnnotationFileCitationFileCitation", "TextAnnotationFilePath", "TextAnnotationFilePathFilePath", ] class TextAnnotationFileCitationFileCitation(BaseModel): file_id: str """The ID of the specific File the citation is from.""" quote: str """The specific quote in the file.""" class TextAnnotationFileCitation(BaseModel): end_index: int file_citation: TextAnnotationFileCitationFileCitation start_index: int text: str """The text in the message content that needs to be replaced.""" type: Literal["file_citation"] """Always `file_citation`.""" class TextAnnotationFilePathFilePath(BaseModel): file_id: str """The ID of the file that was generated.""" class TextAnnotationFilePath(BaseModel): end_index: int file_path: TextAnnotationFilePathFilePath start_index: int text: str """The text in the message content that needs to be replaced.""" type: Literal["file_path"] """Always `file_path`.""" TextAnnotation = Union[TextAnnotationFileCitation, TextAnnotationFilePath] class Text(BaseModel): annotations: List[TextAnnotation] value: str """The data that makes up the text.""" class MessageContentText(BaseModel): text: Text type: Literal["text"] """Always `text`.""" openai-python-1.12.0/src/openai/types/beta/threads/message_create_params.py000066400000000000000000000021141456126711000270250ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["MessageCreateParams"] class MessageCreateParams(TypedDict, total=False): content: Required[str] """The content of the message.""" role: Required[Literal["user"]] """The role of the entity that is creating the message. Currently only `user` is supported. """ file_ids: List[str] """ A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that the message should use. There can be a maximum of 10 files attached to a message. Useful for tools like `retrieval` and `code_interpreter` that can access and use files. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ openai-python-1.12.0/src/openai/types/beta/threads/message_list_params.py000066400000000000000000000022371456126711000265430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, TypedDict __all__ = ["MessageListParams"] class MessageListParams(TypedDict, total=False): after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/threads/message_update_params.py000066400000000000000000000011241456126711000270440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Optional from typing_extensions import Required, TypedDict __all__ = ["MessageUpdateParams"] class MessageUpdateParams(TypedDict, total=False): thread_id: Required[str] metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ openai-python-1.12.0/src/openai/types/beta/threads/messages/000077500000000000000000000000001456126711000237525ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/threads/messages/__init__.py000066400000000000000000000003161456126711000260630ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .message_file import MessageFile as MessageFile from .file_list_params import FileListParams as FileListParams openai-python-1.12.0/src/openai/types/beta/threads/messages/file_list_params.py000066400000000000000000000023011456126711000276350ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["FileListParams"] class FileListParams(TypedDict, total=False): thread_id: Required[str] after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/threads/messages/message_file.py000066400000000000000000000012671456126711000267550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ....._models import BaseModel __all__ = ["MessageFile"] class MessageFile(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the message file was created.""" message_id: str """ The ID of the [message](https://platform.openai.com/docs/api-reference/messages) that the [File](https://platform.openai.com/docs/api-reference/files) is attached to. """ object: Literal["thread.message.file"] """The object type, which is always `thread.message.file`.""" openai-python-1.12.0/src/openai/types/beta/threads/required_action_function_tool_call.py000066400000000000000000000015271456126711000316340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ...._models import BaseModel __all__ = ["RequiredActionFunctionToolCall", "Function"] class Function(BaseModel): arguments: str """The arguments that the model expects you to pass to the function.""" name: str """The name of the function.""" class RequiredActionFunctionToolCall(BaseModel): id: str """The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) endpoint. """ function: Function """The function definition.""" type: Literal["function"] """The type of tool call the output is required for. For now, this is always `function`. """ openai-python-1.12.0/src/openai/types/beta/threads/run.py000066400000000000000000000115711456126711000233260ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union, Optional from typing_extensions import Literal from ...shared import FunctionDefinition from ...._models import BaseModel from .required_action_function_tool_call import RequiredActionFunctionToolCall __all__ = [ "Run", "LastError", "RequiredAction", "RequiredActionSubmitToolOutputs", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", "Usage", ] class LastError(BaseModel): code: Literal["server_error", "rate_limit_exceeded"] """One of `server_error` or `rate_limit_exceeded`.""" message: str """A human-readable description of the error.""" class RequiredActionSubmitToolOutputs(BaseModel): tool_calls: List[RequiredActionFunctionToolCall] """A list of the relevant tool calls.""" class RequiredAction(BaseModel): submit_tool_outputs: RequiredActionSubmitToolOutputs """Details on the tool outputs needed for this run to continue.""" type: Literal["submit_tool_outputs"] """For now, this is always `submit_tool_outputs`.""" class ToolAssistantToolsCode(BaseModel): type: Literal["code_interpreter"] """The type of tool being defined: `code_interpreter`""" class ToolAssistantToolsRetrieval(BaseModel): type: Literal["retrieval"] """The type of tool being defined: `retrieval`""" class ToolAssistantToolsFunction(BaseModel): function: FunctionDefinition type: Literal["function"] """The type of tool being defined: `function`""" Tool = Union[ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction] class Usage(BaseModel): completion_tokens: int """Number of completion tokens used over the course of the run.""" prompt_tokens: int """Number of prompt tokens used over the course of the run.""" total_tokens: int """Total number of tokens used (prompt + completion).""" class Run(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" assistant_id: str """ The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for execution of this run. """ cancelled_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run was cancelled.""" completed_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run was completed.""" created_at: int """The Unix timestamp (in seconds) for when the run was created.""" expires_at: int """The Unix timestamp (in seconds) for when the run will expire.""" failed_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run failed.""" file_ids: List[str] """ The list of [File](https://platform.openai.com/docs/api-reference/files) IDs the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. """ instructions: str """ The instructions that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. """ last_error: Optional[LastError] = None """The last error associated with this run. Will be `null` if there are no errors.""" metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: str """ The model that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. """ object: Literal["thread.run"] """The object type, which is always `thread.run`.""" required_action: Optional[RequiredAction] = None """Details on the action required to continue the run. Will be `null` if no action is required. """ started_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run was started.""" status: Literal[ "queued", "in_progress", "requires_action", "cancelling", "cancelled", "failed", "completed", "expired" ] """ The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. """ thread_id: str """ The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was executed on as a part of this run. """ tools: List[Tool] """ The list of tools that the [assistant](https://platform.openai.com/docs/api-reference/assistants) used for this run. """ usage: Optional[Usage] = None """Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). """ openai-python-1.12.0/src/openai/types/beta/threads/run_create_params.py000066400000000000000000000047301456126711000262130ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ....types import shared_params __all__ = [ "RunCreateParams", "Tool", "ToolAssistantToolsCode", "ToolAssistantToolsRetrieval", "ToolAssistantToolsFunction", ] class RunCreateParams(TypedDict, total=False): assistant_id: Required[str] """ The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to execute this run. """ additional_instructions: Optional[str] """Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. """ instructions: Optional[str] """ Overrides the [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. """ metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ model: Optional[str] """ The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. """ tools: Optional[Iterable[Tool]] """Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. """ class ToolAssistantToolsCode(TypedDict, total=False): type: Required[Literal["code_interpreter"]] """The type of tool being defined: `code_interpreter`""" class ToolAssistantToolsRetrieval(TypedDict, total=False): type: Required[Literal["retrieval"]] """The type of tool being defined: `retrieval`""" class ToolAssistantToolsFunction(TypedDict, total=False): function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of tool being defined: `function`""" Tool = Union[ToolAssistantToolsCode, ToolAssistantToolsRetrieval, ToolAssistantToolsFunction] openai-python-1.12.0/src/openai/types/beta/threads/run_list_params.py000066400000000000000000000022271456126711000257220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, TypedDict __all__ = ["RunListParams"] class RunListParams(TypedDict, total=False): after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/threads/run_submit_tool_outputs_params.py000066400000000000000000000013271456126711000311120ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Iterable from typing_extensions import Required, TypedDict __all__ = ["RunSubmitToolOutputsParams", "ToolOutput"] class RunSubmitToolOutputsParams(TypedDict, total=False): thread_id: Required[str] tool_outputs: Required[Iterable[ToolOutput]] """A list of tools for which the outputs are being submitted.""" class ToolOutput(TypedDict, total=False): output: str """The output of the tool call to be submitted to continue the run.""" tool_call_id: str """ The ID of the tool call in the `required_action` object within the run object the output is being submitted for. """ openai-python-1.12.0/src/openai/types/beta/threads/run_update_params.py000066400000000000000000000011141456126711000262230ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Optional from typing_extensions import Required, TypedDict __all__ = ["RunUpdateParams"] class RunUpdateParams(TypedDict, total=False): thread_id: Required[str] metadata: Optional[object] """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/000077500000000000000000000000001456126711000231325ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/beta/threads/runs/__init__.py000066400000000000000000000010761456126711000252470ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .run_step import RunStep as RunStep from .code_tool_call import CodeToolCall as CodeToolCall from .step_list_params import StepListParams as StepListParams from .function_tool_call import FunctionToolCall as FunctionToolCall from .retrieval_tool_call import RetrievalToolCall as RetrievalToolCall from .tool_calls_step_details import ToolCallsStepDetails as ToolCallsStepDetails from .message_creation_step_details import MessageCreationStepDetails as MessageCreationStepDetails openai-python-1.12.0/src/openai/types/beta/threads/runs/code_tool_call.py000066400000000000000000000031401456126711000264440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union from typing_extensions import Literal from ....._models import BaseModel __all__ = [ "CodeToolCall", "CodeInterpreter", "CodeInterpreterOutput", "CodeInterpreterOutputLogs", "CodeInterpreterOutputImage", "CodeInterpreterOutputImageImage", ] class CodeInterpreterOutputLogs(BaseModel): logs: str """The text output from the Code Interpreter tool call.""" type: Literal["logs"] """Always `logs`.""" class CodeInterpreterOutputImageImage(BaseModel): file_id: str """ The [file](https://platform.openai.com/docs/api-reference/files) ID of the image. """ class CodeInterpreterOutputImage(BaseModel): image: CodeInterpreterOutputImageImage type: Literal["image"] """Always `image`.""" CodeInterpreterOutput = Union[CodeInterpreterOutputLogs, CodeInterpreterOutputImage] class CodeInterpreter(BaseModel): input: str """The input to the Code Interpreter tool call.""" outputs: List[CodeInterpreterOutput] """The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. """ class CodeToolCall(BaseModel): id: str """The ID of the tool call.""" code_interpreter: CodeInterpreter """The Code Interpreter tool call definition.""" type: Literal["code_interpreter"] """The type of tool call. This is always going to be `code_interpreter` for this type of tool call. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/function_tool_call.py000066400000000000000000000015671456126711000273720ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Optional from typing_extensions import Literal from ....._models import BaseModel __all__ = ["FunctionToolCall", "Function"] class Function(BaseModel): arguments: str """The arguments passed to the function.""" name: str """The name of the function.""" output: Optional[str] = None """The output of the function. This will be `null` if the outputs have not been [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs) yet. """ class FunctionToolCall(BaseModel): id: str """The ID of the tool call object.""" function: Function """The definition of the function that was called.""" type: Literal["function"] """The type of tool call. This is always going to be `function` for this type of tool call. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/message_creation_step_details.py000066400000000000000000000007311456126711000315550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ....._models import BaseModel __all__ = ["MessageCreationStepDetails", "MessageCreation"] class MessageCreation(BaseModel): message_id: str """The ID of the message that was created by this run step.""" class MessageCreationStepDetails(BaseModel): message_creation: MessageCreation type: Literal["message_creation"] """Always `message_creation`.""" openai-python-1.12.0/src/openai/types/beta/threads/runs/retrieval_tool_call.py000066400000000000000000000007411456126711000275330ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ....._models import BaseModel __all__ = ["RetrievalToolCall"] class RetrievalToolCall(BaseModel): id: str """The ID of the tool call object.""" retrieval: object """For now, this is always going to be an empty object.""" type: Literal["retrieval"] """The type of tool call. This is always going to be `retrieval` for this type of tool call. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/run_step.py000066400000000000000000000063601456126711000253500ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Union, Optional from typing_extensions import Literal from ....._models import BaseModel from .tool_calls_step_details import ToolCallsStepDetails from .message_creation_step_details import MessageCreationStepDetails __all__ = ["RunStep", "LastError", "StepDetails", "Usage"] class LastError(BaseModel): code: Literal["server_error", "rate_limit_exceeded"] """One of `server_error` or `rate_limit_exceeded`.""" message: str """A human-readable description of the error.""" StepDetails = Union[MessageCreationStepDetails, ToolCallsStepDetails] class Usage(BaseModel): completion_tokens: int """Number of completion tokens used over the course of the run step.""" prompt_tokens: int """Number of prompt tokens used over the course of the run step.""" total_tokens: int """Total number of tokens used (prompt + completion).""" class RunStep(BaseModel): id: str """The identifier of the run step, which can be referenced in API endpoints.""" assistant_id: str """ The ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) associated with the run step. """ cancelled_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run step was cancelled.""" completed_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run step completed.""" created_at: int """The Unix timestamp (in seconds) for when the run step was created.""" expired_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. """ failed_at: Optional[int] = None """The Unix timestamp (in seconds) for when the run step failed.""" last_error: Optional[LastError] = None """The last error associated with this run step. Will be `null` if there are no errors. """ metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ object: Literal["thread.run.step"] """The object type, which is always `thread.run.step`.""" run_id: str """ The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that this run step is a part of. """ status: Literal["in_progress", "cancelled", "failed", "completed", "expired"] """ The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. """ step_details: StepDetails """The details of the run step.""" thread_id: str """ The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) that was run. """ type: Literal["message_creation", "tool_calls"] """The type of run step, which can be either `message_creation` or `tool_calls`.""" usage: Optional[Usage] = None """Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/step_list_params.py000066400000000000000000000023011456126711000270510ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["StepListParams"] class StepListParams(TypedDict, total=False): thread_id: Required[str] after: str """A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. """ before: str """A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. """ limit: int """A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. """ order: Literal["asc", "desc"] """Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. """ openai-python-1.12.0/src/openai/types/beta/threads/runs/tool_calls_step_details.py000066400000000000000000000013401456126711000303750ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union from typing_extensions import Literal from ....._models import BaseModel from .code_tool_call import CodeToolCall from .function_tool_call import FunctionToolCall from .retrieval_tool_call import RetrievalToolCall __all__ = ["ToolCallsStepDetails", "ToolCall"] ToolCall = Union[CodeToolCall, RetrievalToolCall, FunctionToolCall] class ToolCallsStepDetails(BaseModel): tool_calls: List[ToolCall] """An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `retrieval`, or `function`. """ type: Literal["tool_calls"] """Always `tool_calls`.""" openai-python-1.12.0/src/openai/types/beta/threads/thread_message.py000066400000000000000000000040111456126711000254640ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union, Optional from typing_extensions import Literal from ...._models import BaseModel from .message_content_text import MessageContentText from .message_content_image_file import MessageContentImageFile __all__ = ["ThreadMessage", "Content"] Content = Union[MessageContentImageFile, MessageContentText] class ThreadMessage(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" assistant_id: Optional[str] = None """ If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-reference/assistants) that authored this message. """ content: List[Content] """The content of the message in array of text and/or images.""" created_at: int """The Unix timestamp (in seconds) for when the message was created.""" file_ids: List[str] """ A list of [file](https://platform.openai.com/docs/api-reference/files) IDs that the assistant should use. Useful for tools like retrieval and code_interpreter that can access files. A maximum of 10 files can be attached to a message. """ metadata: Optional[object] = None """Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. """ object: Literal["thread.message"] """The object type, which is always `thread.message`.""" role: Literal["user", "assistant"] """The entity that produced the message. One of `user` or `assistant`.""" run_id: Optional[str] = None """ If applicable, the ID of the [run](https://platform.openai.com/docs/api-reference/runs) associated with the authoring of this message. """ thread_id: str """ The [thread](https://platform.openai.com/docs/api-reference/threads) ID that this message belongs to. """ openai-python-1.12.0/src/openai/types/chat/000077500000000000000000000000001456126711000205155ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/chat/__init__.py000066400000000000000000000045771456126711000226430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage from .completion_create_params import CompletionCreateParams as CompletionCreateParams from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ( ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, ) from .chat_completion_assistant_message_param import ( ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, ) from .chat_completion_content_part_text_param import ( ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam, ) from .chat_completion_message_tool_call_param import ( ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, ) from .chat_completion_named_tool_choice_param import ( ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam, ) from .chat_completion_content_part_image_param import ( ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, ) from .chat_completion_tool_choice_option_param import ( ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, ) from .chat_completion_function_call_option_param import ( ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, ) openai-python-1.12.0/src/openai/types/chat/chat_completion.py000066400000000000000000000043601456126711000242420ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel from ..completion_usage import CompletionUsage from .chat_completion_message import ChatCompletionMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"] class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" class Choice(BaseModel): finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. """ index: int """The index of the choice in the list of choices.""" logprobs: Optional[ChoiceLogprobs] = None """Log probability information for the choice.""" message: ChatCompletionMessage """A chat completion message generated by the model.""" class ChatCompletion(BaseModel): id: str """A unique identifier for the chat completion.""" choices: List[Choice] """A list of chat completion choices. Can be more than one if `n` is greater than 1. """ created: int """The Unix timestamp (in seconds) of when the chat completion was created.""" model: str """The model used for the chat completion.""" object: Literal["chat.completion"] """The object type, which is always `chat.completion`.""" system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. """ usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_assistant_message_param.py000066400000000000000000000031051456126711000312130ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Iterable, Optional from typing_extensions import Literal, Required, TypedDict from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam __all__ = ["ChatCompletionAssistantMessageParam", "FunctionCall"] class FunctionCall(TypedDict, total=False): arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: Required[str] """The name of the function to call.""" class ChatCompletionAssistantMessageParam(TypedDict, total=False): role: Required[Literal["assistant"]] """The role of the messages author, in this case `assistant`.""" content: Optional[str] """The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. """ function_call: FunctionCall """Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. """ name: str """An optional name for the participant. Provides the model information to differentiate between participants of the same role. """ tool_calls: Iterable[ChatCompletionMessageToolCallParam] """The tool calls generated by the model, such as function calls.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_chunk.py000066400000000000000000000077361456126711000254440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel from .chat_completion_token_logprob import ChatCompletionTokenLogprob __all__ = [ "ChatCompletionChunk", "Choice", "ChoiceDelta", "ChoiceDeltaFunctionCall", "ChoiceDeltaToolCall", "ChoiceDeltaToolCallFunction", "ChoiceLogprobs", ] class ChoiceDeltaFunctionCall(BaseModel): arguments: Optional[str] = None """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: Optional[str] = None """The name of the function to call.""" class ChoiceDeltaToolCallFunction(BaseModel): arguments: Optional[str] = None """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: Optional[str] = None """The name of the function to call.""" class ChoiceDeltaToolCall(BaseModel): index: int id: Optional[str] = None """The ID of the tool call.""" function: Optional[ChoiceDeltaToolCallFunction] = None type: Optional[Literal["function"]] = None """The type of the tool. Currently, only `function` is supported.""" class ChoiceDelta(BaseModel): content: Optional[str] = None """The contents of the chunk message.""" function_call: Optional[ChoiceDeltaFunctionCall] = None """Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. """ role: Optional[Literal["system", "user", "assistant", "tool"]] = None """The role of the author of this message.""" tool_calls: Optional[List[ChoiceDeltaToolCall]] = None class ChoiceLogprobs(BaseModel): content: Optional[List[ChatCompletionTokenLogprob]] = None """A list of message content tokens with log probability information.""" class Choice(BaseModel): delta: ChoiceDelta """A chat completion delta generated by streamed model responses.""" finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. """ index: int """The index of the choice in the list of choices.""" logprobs: Optional[ChoiceLogprobs] = None """Log probability information for the choice.""" class ChatCompletionChunk(BaseModel): id: str """A unique identifier for the chat completion. Each chunk has the same ID.""" choices: List[Choice] """A list of chat completion choices. Can be more than one if `n` is greater than 1. """ created: int """The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. """ model: str """The model to generate the completion.""" object: Literal["chat.completion.chunk"] """The object type, which is always `chat.completion.chunk`.""" system_fingerprint: Optional[str] = None """ This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. """ openai-python-1.12.0/src/openai/types/chat/chat_completion_content_part_image_param.py000066400000000000000000000013741456126711000313460ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionContentPartImageParam", "ImageURL"] class ImageURL(TypedDict, total=False): url: Required[str] """Either a URL of the image or the base64 encoded image data.""" detail: Literal["auto", "low", "high"] """Specifies the detail level of the image. Learn more in the [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding). """ class ChatCompletionContentPartImageParam(TypedDict, total=False): image_url: Required[ImageURL] type: Required[Literal["image_url"]] """The type of the content part.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_content_part_param.py000066400000000000000000000007051456126711000302010ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam __all__ = ["ChatCompletionContentPartParam"] ChatCompletionContentPartParam = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam] openai-python-1.12.0/src/openai/types/chat/chat_completion_content_part_text_param.py000066400000000000000000000006141456126711000312440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionContentPartTextParam"] class ChatCompletionContentPartTextParam(TypedDict, total=False): text: Required[str] """The text content.""" type: Required[Literal["text"]] """The type of the content part.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_function_call_option_param.py000066400000000000000000000005141456126711000317070ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Required, TypedDict __all__ = ["ChatCompletionFunctionCallOptionParam"] class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): name: Required[str] """The name of the function to call.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_function_message_param.py000066400000000000000000000010561456126711000310320ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionFunctionMessageParam"] class ChatCompletionFunctionMessageParam(TypedDict, total=False): content: Required[Optional[str]] """The contents of the function message.""" name: Required[str] """The name of the function to call.""" role: Required[Literal["function"]] """The role of the messages author, in this case `function`.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_message.py000066400000000000000000000023411456126711000257430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Optional from typing_extensions import Literal from ..._models import BaseModel from .chat_completion_message_tool_call import ChatCompletionMessageToolCall __all__ = ["ChatCompletionMessage", "FunctionCall"] class FunctionCall(BaseModel): arguments: str """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: str """The name of the function to call.""" class ChatCompletionMessage(BaseModel): content: Optional[str] = None """The contents of the message.""" role: Literal["assistant"] """The role of the author of this message.""" function_call: Optional[FunctionCall] = None """Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. """ tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None """The tool calls generated by the model, such as function calls.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_message_param.py000066400000000000000000000014451456126711000271270ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from .chat_completion_tool_message_param import ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam from .chat_completion_system_message_param import ChatCompletionSystemMessageParam from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam __all__ = ["ChatCompletionMessageParam"] ChatCompletionMessageParam = Union[ ChatCompletionSystemMessageParam, ChatCompletionUserMessageParam, ChatCompletionAssistantMessageParam, ChatCompletionToolMessageParam, ChatCompletionFunctionMessageParam, ] openai-python-1.12.0/src/openai/types/chat/chat_completion_message_tool_call.py000066400000000000000000000015431456126711000277760ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ..._models import BaseModel __all__ = ["ChatCompletionMessageToolCall", "Function"] class Function(BaseModel): arguments: str """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: str """The name of the function to call.""" class ChatCompletionMessageToolCall(BaseModel): id: str """The ID of the tool call.""" function: Function """The function that the model called.""" type: Literal["function"] """The type of the tool. Currently, only `function` is supported.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_message_tool_call_param.py000066400000000000000000000017201456126711000311530ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionMessageToolCallParam", "Function"] class Function(TypedDict, total=False): arguments: Required[str] """ The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. """ name: Required[str] """The name of the function to call.""" class ChatCompletionMessageToolCallParam(TypedDict, total=False): id: Required[str] """The ID of the tool call.""" function: Required[Function] """The function that the model called.""" type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_named_tool_choice_param.py000066400000000000000000000010301456126711000311240ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionNamedToolChoiceParam", "Function"] class Function(TypedDict, total=False): name: Required[str] """The name of the function to call.""" class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): function: Required[Function] type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_role.py000066400000000000000000000003171456126711000252610ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal __all__ = ["ChatCompletionRole"] ChatCompletionRole = Literal["system", "user", "assistant", "tool", "function"] openai-python-1.12.0/src/openai/types/chat/chat_completion_system_message_param.py000066400000000000000000000011351456126711000305270ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionSystemMessageParam"] class ChatCompletionSystemMessageParam(TypedDict, total=False): content: Required[str] """The contents of the system message.""" role: Required[Literal["system"]] """The role of the messages author, in this case `system`.""" name: str """An optional name for the participant. Provides the model information to differentiate between participants of the same role. """ openai-python-1.12.0/src/openai/types/chat/chat_completion_token_logprob.py000066400000000000000000000026401456126711000271650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Optional from ..._models import BaseModel __all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] class TopLogprob(BaseModel): token: str """The token.""" bytes: Optional[List[int]] = None """A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. """ logprob: float """The log probability of this token.""" class ChatCompletionTokenLogprob(BaseModel): token: str """The token.""" bytes: Optional[List[int]] = None """A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. """ logprob: float """The log probability of this token.""" top_logprobs: List[TopLogprob] """List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. """ openai-python-1.12.0/src/openai/types/chat/chat_completion_tool_choice_option_param.py000066400000000000000000000006171456126711000313620ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union from typing_extensions import Literal from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam __all__ = ["ChatCompletionToolChoiceOptionParam"] ChatCompletionToolChoiceOptionParam = Union[Literal["none", "auto"], ChatCompletionNamedToolChoiceParam] openai-python-1.12.0/src/openai/types/chat/chat_completion_tool_message_param.py000066400000000000000000000010101456126711000301500ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict __all__ = ["ChatCompletionToolMessageParam"] class ChatCompletionToolMessageParam(TypedDict, total=False): content: Required[str] """The contents of the tool message.""" role: Required[Literal["tool"]] """The role of the messages author, in this case `tool`.""" tool_call_id: Required[str] """Tool call that this message is responding to.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_tool_param.py000066400000000000000000000007041456126711000264550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict from ...types import shared_params __all__ = ["ChatCompletionToolParam"] class ChatCompletionToolParam(TypedDict, total=False): function: Required[shared_params.FunctionDefinition] type: Required[Literal["function"]] """The type of the tool. Currently, only `function` is supported.""" openai-python-1.12.0/src/openai/types/chat/chat_completion_user_message_param.py000066400000000000000000000013671456126711000301700ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Iterable from typing_extensions import Literal, Required, TypedDict from .chat_completion_content_part_param import ChatCompletionContentPartParam __all__ = ["ChatCompletionUserMessageParam"] class ChatCompletionUserMessageParam(TypedDict, total=False): content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] """The contents of the user message.""" role: Required[Literal["user"]] """The role of the messages author, in this case `user`.""" name: str """An optional name for the participant. Provides the model information to differentiate between participants of the same role. """ openai-python-1.12.0/src/openai/types/chat/completion_create_params.py000066400000000000000000000245471456126711000261420ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict from ...types import shared_params from .chat_completion_tool_param import ChatCompletionToolParam from .chat_completion_message_param import ChatCompletionMessageParam from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam __all__ = [ "CompletionCreateParamsBase", "FunctionCall", "Function", "ResponseFormat", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming", ] class CompletionCreateParamsBase(TypedDict, total=False): messages: Required[Iterable[ChatCompletionMessageParam]] """A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). """ model: Required[ Union[ str, Literal[ "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", "gpt-3.5-turbo-0125", "gpt-3.5-turbo-16k-0613", ], ] ] """ID of the model to use. See the [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. """ frequency_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) """ function_call: FunctionCall """Deprecated in favor of `tool_choice`. Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. """ functions: Iterable[Function] """Deprecated in favor of `tools`. A list of functions the model may generate JSON inputs for. """ logit_bias: Optional[Dict[str, int]] """Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. """ logprobs: Optional[bool] """Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. """ max_tokens: Optional[int] """ The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. """ n: Optional[int] """How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. """ presence_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) """ response_format: ResponseFormat """An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. """ seed: Optional[int] """ This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. """ stop: Union[Optional[str], List[str]] """Up to 4 sequences where the API will stop generating further tokens.""" temperature: Optional[float] """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. """ tool_choice: ChatCompletionToolChoiceOptionParam """ Controls which (if any) function is called by the model. `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. Specifying a particular function via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that function. `none` is the default when no functions are present. `auto` is the default if functions are present. """ tools: Iterable[ChatCompletionToolParam] """A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. """ top_logprobs: Optional[int] """ An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. """ top_p: Optional[float] """ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ FunctionCall = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam] class Function(TypedDict, total=False): name: Required[str] """The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. """ description: str """ A description of what the function does, used by the model to choose when and how to call the function. """ parameters: shared_params.FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. Omitting `parameters` defines a function with an empty parameter list. """ class ResponseFormat(TypedDict, total=False): type: Literal["text", "json_object"] """Must be one of `text` or `json_object`.""" class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): stream: Optional[Literal[False]] """If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). """ class CompletionCreateParamsStreaming(CompletionCreateParamsBase): stream: Required[Literal[True]] """If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). """ CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming] openai-python-1.12.0/src/openai/types/completion.py000066400000000000000000000021631456126711000223230ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Optional from typing_extensions import Literal from .._models import BaseModel from .completion_usage import CompletionUsage from .completion_choice import CompletionChoice __all__ = ["Completion"] class Completion(BaseModel): id: str """A unique identifier for the completion.""" choices: List[CompletionChoice] """The list of completion choices the model generated for the input prompt.""" created: int """The Unix timestamp (in seconds) of when the completion was created.""" model: str """The model used for completion.""" object: Literal["text_completion"] """The object type, which is always "text_completion" """ system_fingerprint: Optional[str] = None """This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. """ usage: Optional[CompletionUsage] = None """Usage statistics for the completion request.""" openai-python-1.12.0/src/openai/types/completion_choice.py000066400000000000000000000016441456126711000236400ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Dict, List, Optional from typing_extensions import Literal from .._models import BaseModel __all__ = ["CompletionChoice", "Logprobs"] class Logprobs(BaseModel): text_offset: Optional[List[int]] = None token_logprobs: Optional[List[float]] = None tokens: Optional[List[str]] = None top_logprobs: Optional[List[Dict[str, float]]] = None class CompletionChoice(BaseModel): finish_reason: Literal["stop", "length", "content_filter"] """The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, or `content_filter` if content was omitted due to a flag from our content filters. """ index: int logprobs: Optional[Logprobs] = None text: str openai-python-1.12.0/src/openai/types/completion_create_params.py000066400000000000000000000160671456126711000252210ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"] class CompletionCreateParamsBase(TypedDict, total=False): model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]] """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ prompt: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]], None]] """ The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. """ best_of: Optional[int] """ Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. """ echo: Optional[bool] """Echo back the prompt in addition to the completion""" frequency_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) """ logit_bias: Optional[Dict[str, int]] """Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. """ logprobs: Optional[int] """ Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. """ max_tokens: Optional[int] """ The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. """ n: Optional[int] """How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. """ presence_penalty: Optional[float] """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details) """ seed: Optional[int] """ If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. """ stop: Union[Optional[str], List[str], None] """Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. """ suffix: Optional[str] """The suffix that comes after a completion of inserted text.""" temperature: Optional[float] """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. """ top_p: Optional[float] """ An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase): stream: Optional[Literal[False]] """Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). """ class CompletionCreateParamsStreaming(CompletionCreateParamsBase): stream: Required[Literal[True]] """Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). """ CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming] openai-python-1.12.0/src/openai/types/completion_usage.py000066400000000000000000000006211456126711000235040ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .._models import BaseModel __all__ = ["CompletionUsage"] class CompletionUsage(BaseModel): completion_tokens: int """Number of tokens in the generated completion.""" prompt_tokens: int """Number of tokens in the prompt.""" total_tokens: int """Total number of tokens used in the request (prompt + completion).""" openai-python-1.12.0/src/openai/types/create_embedding_response.py000066400000000000000000000013751456126711000253350ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List from typing_extensions import Literal from .._models import BaseModel from .embedding import Embedding __all__ = ["CreateEmbeddingResponse", "Usage"] class Usage(BaseModel): prompt_tokens: int """The number of tokens used by the prompt.""" total_tokens: int """The total number of tokens used by the request.""" class CreateEmbeddingResponse(BaseModel): data: List[Embedding] """The list of embeddings generated by the model.""" model: str """The name of the model used to generate the embedding.""" object: Literal["list"] """The object type, which is always "list".""" usage: Usage """The usage information for the request.""" openai-python-1.12.0/src/openai/types/embedding.py000066400000000000000000000011341456126711000220650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List from typing_extensions import Literal from .._models import BaseModel __all__ = ["Embedding"] class Embedding(BaseModel): embedding: List[float] """The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](https://platform.openai.com/docs/guides/embeddings). """ index: int """The index of the embedding in the list of embeddings.""" object: Literal["embedding"] """The object type, which is always "embedding".""" openai-python-1.12.0/src/openai/types/embedding_create_params.py000066400000000000000000000034741456126711000247640ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union, Iterable from typing_extensions import Literal, Required, TypedDict __all__ = ["EmbeddingCreateParams"] class EmbeddingCreateParams(TypedDict, total=False): input: Required[Union[str, List[str], Iterable[int], Iterable[Iterable[int]]]] """Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. """ model: Required[Union[str, Literal["text-embedding-ada-002", "text-embedding-3-small", "text-embedding-3-large"]]] """ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them. """ dimensions: int """The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. """ encoding_format: Literal["float", "base64"] """The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/). """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ openai-python-1.12.0/src/openai/types/file_content.py000066400000000000000000000001441456126711000226200ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. __all__ = ["FileContent"] FileContent = str openai-python-1.12.0/src/openai/types/file_create_params.py000066400000000000000000000015101456126711000237520ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes __all__ = ["FileCreateParams"] class FileCreateParams(TypedDict, total=False): file: Required[FileTypes] """The File object (not file name) to be uploaded.""" purpose: Required[Literal["fine-tune", "assistants"]] """The intended purpose of the uploaded file. Use "fine-tune" for [Fine-tuning](https://platform.openai.com/docs/api-reference/fine-tuning) and "assistants" for [Assistants](https://platform.openai.com/docs/api-reference/assistants) and [Messages](https://platform.openai.com/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. """ openai-python-1.12.0/src/openai/types/file_deleted.py000066400000000000000000000003641456126711000225600ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from .._models import BaseModel __all__ = ["FileDeleted"] class FileDeleted(BaseModel): id: str deleted: bool object: Literal["file"] openai-python-1.12.0/src/openai/types/file_list_params.py000066400000000000000000000004251456126711000234660ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import TypedDict __all__ = ["FileListParams"] class FileListParams(TypedDict, total=False): purpose: str """Only return files with the given purpose.""" openai-python-1.12.0/src/openai/types/file_object.py000066400000000000000000000022511456126711000224150ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Optional from typing_extensions import Literal from .._models import BaseModel __all__ = ["FileObject"] class FileObject(BaseModel): id: str """The file identifier, which can be referenced in the API endpoints.""" bytes: int """The size of the file, in bytes.""" created_at: int """The Unix timestamp (in seconds) for when the file was created.""" filename: str """The name of the file.""" object: Literal["file"] """The object type, which is always `file`.""" purpose: Literal["fine-tune", "fine-tune-results", "assistants", "assistants_output"] """The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, `assistants`, and `assistants_output`. """ status: Literal["uploaded", "processed", "error"] """Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. """ status_details: Optional[str] = None """Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. """ openai-python-1.12.0/src/openai/types/fine_tuning/000077500000000000000000000000001456126711000221035ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/fine_tuning/__init__.py000066400000000000000000000006571456126711000242240ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from .fine_tuning_job import FineTuningJob as FineTuningJob from .job_list_params import JobListParams as JobListParams from .job_create_params import JobCreateParams as JobCreateParams from .fine_tuning_job_event import FineTuningJobEvent as FineTuningJobEvent from .job_list_events_params import JobListEventsParams as JobListEventsParams openai-python-1.12.0/src/openai/types/fine_tuning/fine_tuning_job.py000066400000000000000000000063601456126711000256210ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List, Union, Optional from typing_extensions import Literal from ..._models import BaseModel __all__ = ["FineTuningJob", "Error", "Hyperparameters"] class Error(BaseModel): code: str """A machine-readable error code.""" message: str """A human-readable error message.""" param: Optional[str] = None """The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. """ class Hyperparameters(BaseModel): n_epochs: Union[Literal["auto"], int] """The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. """ class FineTuningJob(BaseModel): id: str """The object identifier, which can be referenced in the API endpoints.""" created_at: int """The Unix timestamp (in seconds) for when the fine-tuning job was created.""" error: Optional[Error] = None """ For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. """ fine_tuned_model: Optional[str] = None """The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. """ finished_at: Optional[int] = None """The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. """ hyperparameters: Hyperparameters """The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. """ model: str """The base model that is being fine-tuned.""" object: Literal["fine_tuning.job"] """The object type, which is always "fine_tuning.job".""" organization_id: str """The organization that owns the fine-tuning job.""" result_files: List[str] """The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). """ status: Literal["validating_files", "queued", "running", "succeeded", "failed", "cancelled"] """ The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. """ trained_tokens: Optional[int] = None """The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. """ training_file: str """The file ID used for training. You can retrieve the training data with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). """ validation_file: Optional[str] = None """The file ID used for validation. You can retrieve the validation results with the [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents). """ openai-python-1.12.0/src/openai/types/fine_tuning/fine_tuning_job_event.py000066400000000000000000000005251456126711000270170ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from ..._models import BaseModel __all__ = ["FineTuningJobEvent"] class FineTuningJobEvent(BaseModel): id: str created_at: int level: Literal["info", "warn", "error"] message: str object: Literal["fine_tuning.job.event"] openai-python-1.12.0/src/openai/types/fine_tuning/job_create_params.py000066400000000000000000000050221456126711000261140ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["JobCreateParams", "Hyperparameters"] class JobCreateParams(TypedDict, total=False): model: Required[Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]]] """The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). """ training_file: Required[str] """The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/upload) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. """ hyperparameters: Hyperparameters """The hyperparameters used for the fine-tuning job.""" suffix: Optional[str] """ A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. """ validation_file: Optional[str] """The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. """ class Hyperparameters(TypedDict, total=False): batch_size: Union[Literal["auto"], int] """Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance. """ learning_rate_multiplier: Union[Literal["auto"], float] """Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting. """ n_epochs: Union[Literal["auto"], int] """The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. """ openai-python-1.12.0/src/openai/types/fine_tuning/job_list_events_params.py000066400000000000000000000005571456126711000272200ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import TypedDict __all__ = ["JobListEventsParams"] class JobListEventsParams(TypedDict, total=False): after: str """Identifier for the last event from the previous pagination request.""" limit: int """Number of events to retrieve.""" openai-python-1.12.0/src/openai/types/fine_tuning/job_list_params.py000066400000000000000000000005531456126711000256300ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import TypedDict __all__ = ["JobListParams"] class JobListParams(TypedDict, total=False): after: str """Identifier for the last job from the previous pagination request.""" limit: int """Number of fine-tuning jobs to retrieve.""" openai-python-1.12.0/src/openai/types/image.py000066400000000000000000000010761456126711000212360ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Optional from .._models import BaseModel __all__ = ["Image"] class Image(BaseModel): b64_json: Optional[str] = None """ The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. """ revised_prompt: Optional[str] = None """ The prompt that was used to generate the image, if there was any revision to the prompt. """ url: Optional[str] = None """The URL of the generated image, if `response_format` is `url` (default).""" openai-python-1.12.0/src/openai/types/image_create_variation_params.py000066400000000000000000000024761456126711000262050ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes __all__ = ["ImageCreateVariationParams"] class ImageCreateVariationParams(TypedDict, total=False): image: Required[FileTypes] """The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. """ model: Union[str, Literal["dall-e-2"], None] """The model to use for image generation. Only `dall-e-2` is supported at this time. """ n: Optional[int] """The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. """ response_format: Optional[Literal["url", "b64_json"]] """The format in which the generated images are returned. Must be one of `url` or `b64_json`. """ size: Optional[Literal["256x256", "512x512", "1024x1024"]] """The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ openai-python-1.12.0/src/openai/types/image_edit_params.py000066400000000000000000000032461456126711000236070ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict from .._types import FileTypes __all__ = ["ImageEditParams"] class ImageEditParams(TypedDict, total=False): image: Required[FileTypes] """The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. """ prompt: Required[str] """A text description of the desired image(s). The maximum length is 1000 characters. """ mask: FileTypes """An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. """ model: Union[str, Literal["dall-e-2"], None] """The model to use for image generation. Only `dall-e-2` is supported at this time. """ n: Optional[int] """The number of images to generate. Must be between 1 and 10.""" response_format: Optional[Literal["url", "b64_json"]] """The format in which the generated images are returned. Must be one of `url` or `b64_json`. """ size: Optional[Literal["256x256", "512x512", "1024x1024"]] """The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ openai-python-1.12.0/src/openai/types/image_generate_params.py000066400000000000000000000037301456126711000244520ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Union, Optional from typing_extensions import Literal, Required, TypedDict __all__ = ["ImageGenerateParams"] class ImageGenerateParams(TypedDict, total=False): prompt: Required[str] """A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. """ model: Union[str, Literal["dall-e-2", "dall-e-3"], None] """The model to use for image generation.""" n: Optional[int] """The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. """ quality: Literal["standard", "hd"] """The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. """ response_format: Optional[Literal["url", "b64_json"]] """The format in which the generated images are returned. Must be one of `url` or `b64_json`. """ size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] """The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. """ style: Optional[Literal["vivid", "natural"]] """The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. """ user: str """ A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids). """ openai-python-1.12.0/src/openai/types/images_response.py000066400000000000000000000003611456126711000233330ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List from .image import Image from .._models import BaseModel __all__ = ["ImagesResponse"] class ImagesResponse(BaseModel): created: int data: List[Image] openai-python-1.12.0/src/openai/types/model.py000066400000000000000000000007631456126711000212560ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing_extensions import Literal from .._models import BaseModel __all__ = ["Model"] class Model(BaseModel): id: str """The model identifier, which can be referenced in the API endpoints.""" created: int """The Unix timestamp (in seconds) when the model was created.""" object: Literal["model"] """The object type, which is always "model".""" owned_by: str """The organization that owns the model.""" openai-python-1.12.0/src/openai/types/model_deleted.py000066400000000000000000000003031456126711000227320ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .._models import BaseModel __all__ = ["ModelDeleted"] class ModelDeleted(BaseModel): id: str deleted: bool object: str openai-python-1.12.0/src/openai/types/moderation.py000066400000000000000000000075521456126711000223220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from pydantic import Field as FieldInfo from .._models import BaseModel __all__ = ["Moderation", "Categories", "CategoryScores"] class Categories(BaseModel): harassment: bool """ Content that expresses, incites, or promotes harassing language towards any target. """ harassment_threatening: bool = FieldInfo(alias="harassment/threatening") """ Harassment content that also includes violence or serious harm towards any target. """ hate: bool """ Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. """ hate_threatening: bool = FieldInfo(alias="hate/threatening") """ Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. """ self_harm: bool = FieldInfo(alias="self-harm") """ Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. """ self_harm_instructions: bool = FieldInfo(alias="self-harm/instructions") """ Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. """ self_harm_intent: bool = FieldInfo(alias="self-harm/intent") """ Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. """ sexual: bool """ Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). """ sexual_minors: bool = FieldInfo(alias="sexual/minors") """Sexual content that includes an individual who is under 18 years old.""" violence: bool """Content that depicts death, violence, or physical injury.""" violence_graphic: bool = FieldInfo(alias="violence/graphic") """Content that depicts death, violence, or physical injury in graphic detail.""" class CategoryScores(BaseModel): harassment: float """The score for the category 'harassment'.""" harassment_threatening: float = FieldInfo(alias="harassment/threatening") """The score for the category 'harassment/threatening'.""" hate: float """The score for the category 'hate'.""" hate_threatening: float = FieldInfo(alias="hate/threatening") """The score for the category 'hate/threatening'.""" self_harm: float = FieldInfo(alias="self-harm") """The score for the category 'self-harm'.""" self_harm_instructions: float = FieldInfo(alias="self-harm/instructions") """The score for the category 'self-harm/instructions'.""" self_harm_intent: float = FieldInfo(alias="self-harm/intent") """The score for the category 'self-harm/intent'.""" sexual: float """The score for the category 'sexual'.""" sexual_minors: float = FieldInfo(alias="sexual/minors") """The score for the category 'sexual/minors'.""" violence: float """The score for the category 'violence'.""" violence_graphic: float = FieldInfo(alias="violence/graphic") """The score for the category 'violence/graphic'.""" class Moderation(BaseModel): categories: Categories """A list of the categories, and whether they are flagged or not.""" category_scores: CategoryScores """A list of the categories along with their scores as predicted by model.""" flagged: bool """ Whether the content violates [OpenAI's usage policies](/policies/usage-policies). """ openai-python-1.12.0/src/openai/types/moderation_create_params.py000066400000000000000000000016311456126711000252000ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import List, Union from typing_extensions import Literal, Required, TypedDict __all__ = ["ModerationCreateParams"] class ModerationCreateParams(TypedDict, total=False): input: Required[Union[str, List[str]]] """The input text to classify""" model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] """ Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. """ openai-python-1.12.0/src/openai/types/moderation_create_response.py000066400000000000000000000007031456126711000255520ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import List from .._models import BaseModel from .moderation import Moderation __all__ = ["ModerationCreateResponse"] class ModerationCreateResponse(BaseModel): id: str """The unique identifier for the moderation request.""" model: str """The model used to generate the moderation results.""" results: List[Moderation] """A list of moderation objects.""" openai-python-1.12.0/src/openai/types/shared/000077500000000000000000000000001456126711000210445ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/shared/__init__.py000066400000000000000000000003121456126711000231510ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters openai-python-1.12.0/src/openai/types/shared/function_definition.py000066400000000000000000000020121456126711000254460ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Optional from ..._models import BaseModel from .function_parameters import FunctionParameters __all__ = ["FunctionDefinition"] class FunctionDefinition(BaseModel): name: str """The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. """ description: Optional[str] = None """ A description of what the function does, used by the model to choose when and how to call the function. """ parameters: Optional[FunctionParameters] = None """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. Omitting `parameters` defines a function with an empty parameter list. """ openai-python-1.12.0/src/openai/types/shared/function_parameters.py000066400000000000000000000002301456126711000254610ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from typing import Dict __all__ = ["FunctionParameters"] FunctionParameters = Dict[str, object] openai-python-1.12.0/src/openai/types/shared_params/000077500000000000000000000000001456126711000224075ustar00rootroot00000000000000openai-python-1.12.0/src/openai/types/shared_params/__init__.py000066400000000000000000000003121456126711000245140ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from .function_definition import FunctionDefinition as FunctionDefinition from .function_parameters import FunctionParameters as FunctionParameters openai-python-1.12.0/src/openai/types/shared_params/function_definition.py000066400000000000000000000020251456126711000270150ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing_extensions import Required, TypedDict from ...types import shared_params __all__ = ["FunctionDefinition"] class FunctionDefinition(TypedDict, total=False): name: Required[str] """The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. """ description: str """ A description of what the function does, used by the model to choose when and how to call the function. """ parameters: shared_params.FunctionParameters """The parameters the functions accepts, described as a JSON Schema object. See the [guide](https://platform.openai.com/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. Omitting `parameters` defines a function with an empty parameter list. """ openai-python-1.12.0/src/openai/types/shared_params/function_parameters.py000066400000000000000000000002741456126711000270340ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations from typing import Dict __all__ = ["FunctionParameters"] FunctionParameters = Dict[str, object] openai-python-1.12.0/src/openai/version.py000066400000000000000000000000761456126711000204740ustar00rootroot00000000000000from ._version import __version__ VERSION: str = __version__ openai-python-1.12.0/tests/000077500000000000000000000000001456126711000155325ustar00rootroot00000000000000openai-python-1.12.0/tests/__init__.py000066400000000000000000000000651456126711000176440ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/000077500000000000000000000000001456126711000203755ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/__init__.py000066400000000000000000000000651456126711000225070ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/audio/000077500000000000000000000000001456126711000214765ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/audio/__init__.py000066400000000000000000000000651456126711000236100ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/audio/test_speech.py000066400000000000000000000131661456126711000243650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import httpx import pytest from respx import MockRouter import openai._legacy_response as _legacy_response from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type # pyright: reportDeprecated=false base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestSpeech: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize @pytest.mark.respx(base_url=base_url) def test_method_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) def test_method_create_with_all_params(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( input="string", model="string", voice="alloy", response_format="mp3", speed=0.25, ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) def test_raw_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = client.audio.speech.with_raw_response.create( input="string", model="string", voice="alloy", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" speech = response.parse() assert_matches_type(_legacy_response.HttpxBinaryResponseContent, speech, path=["response"]) @parametrize @pytest.mark.respx(base_url=base_url) def test_streaming_response_create(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( input="string", model="string", voice="alloy", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" speech = response.parse() assert_matches_type(bytes, speech, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncSpeech: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize @pytest.mark.respx(base_url=base_url) async def test_method_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", model="string", voice="alloy", ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) async def test_method_create_with_all_params(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( input="string", model="string", voice="alloy", response_format="mp3", speed=0.25, ) assert isinstance(speech, _legacy_response.HttpxBinaryResponseContent) assert speech.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) async def test_raw_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await async_client.audio.speech.with_raw_response.create( input="string", model="string", voice="alloy", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" speech = response.parse() assert_matches_type(_legacy_response.HttpxBinaryResponseContent, speech, path=["response"]) @parametrize @pytest.mark.respx(base_url=base_url) async def test_streaming_response_create(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.post("/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( input="string", model="string", voice="alloy", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" speech = await response.parse() assert_matches_type(bytes, speech, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/audio/test_transcriptions.py000066400000000000000000000104471456126711000261770ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types.audio import Transcription base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestTranscriptions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: transcription = client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", ) assert_matches_type(Transcription, transcription, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: transcription = client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", language="string", prompt="string", response_format="json", temperature=0, timestamp_granularities=["word", "segment"], ) assert_matches_type(Transcription, transcription, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.audio.transcriptions.with_raw_response.create( file=b"raw file contents", model="whisper-1", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() assert_matches_type(Transcription, transcription, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.audio.transcriptions.with_streaming_response.create( file=b"raw file contents", model="whisper-1", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() assert_matches_type(Transcription, transcription, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncTranscriptions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: transcription = await async_client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", ) assert_matches_type(Transcription, transcription, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: transcription = await async_client.audio.transcriptions.create( file=b"raw file contents", model="whisper-1", language="string", prompt="string", response_format="json", temperature=0, timestamp_granularities=["word", "segment"], ) assert_matches_type(Transcription, transcription, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.audio.transcriptions.with_raw_response.create( file=b"raw file contents", model="whisper-1", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = response.parse() assert_matches_type(Transcription, transcription, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.audio.transcriptions.with_streaming_response.create( file=b"raw file contents", model="whisper-1", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" transcription = await response.parse() assert_matches_type(Transcription, transcription, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/audio/test_translations.py000066400000000000000000000100651456126711000256320ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types.audio import Translation base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestTranslations: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: translation = client.audio.translations.create( file=b"raw file contents", model="whisper-1", ) assert_matches_type(Translation, translation, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: translation = client.audio.translations.create( file=b"raw file contents", model="whisper-1", prompt="string", response_format="string", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.audio.translations.with_raw_response.create( file=b"raw file contents", model="whisper-1", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() assert_matches_type(Translation, translation, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.audio.translations.with_streaming_response.create( file=b"raw file contents", model="whisper-1", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() assert_matches_type(Translation, translation, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncTranslations: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: translation = await async_client.audio.translations.create( file=b"raw file contents", model="whisper-1", ) assert_matches_type(Translation, translation, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: translation = await async_client.audio.translations.create( file=b"raw file contents", model="whisper-1", prompt="string", response_format="string", temperature=0, ) assert_matches_type(Translation, translation, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.audio.translations.with_raw_response.create( file=b"raw file contents", model="whisper-1", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = response.parse() assert_matches_type(Translation, translation, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.audio.translations.with_streaming_response.create( file=b"raw file contents", model="whisper-1", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" translation = await response.parse() assert_matches_type(Translation, translation, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/beta/000077500000000000000000000000001456126711000213105ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/__init__.py000066400000000000000000000000651456126711000234220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/assistants/000077500000000000000000000000001456126711000235045ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/assistants/__init__.py000066400000000000000000000000651456126711000256160ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/assistants/test_files.py000066400000000000000000000360341456126711000262250ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta.assistants import AssistantFile, FileDeleteResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestFiles: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: file = client.beta.assistants.files.create( "file-abc123", file_id="string", ) assert_matches_type(AssistantFile, file, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.assistants.files.with_raw_response.create( "file-abc123", file_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.assistants.files.with_streaming_response.create( "file-abc123", file_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.files.with_raw_response.create( "", file_id="string", ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file = client.beta.assistants.files.retrieve( "string", assistant_id="string", ) assert_matches_type(AssistantFile, file, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.assistants.files.with_raw_response.retrieve( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.assistants.files.with_streaming_response.retrieve( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.files.with_raw_response.retrieve( "string", assistant_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.beta.assistants.files.with_raw_response.retrieve( "", assistant_id="string", ) @parametrize def test_method_list(self, client: OpenAI) -> None: file = client.beta.assistants.files.list( "string", ) assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.beta.assistants.files.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.assistants.files.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.assistants.files.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncCursorPage[AssistantFile], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.files.with_raw_response.list( "", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: file = client.beta.assistants.files.delete( "string", assistant_id="string", ) assert_matches_type(FileDeleteResponse, file, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.assistants.files.with_raw_response.delete( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleteResponse, file, path=["response"]) @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.assistants.files.with_streaming_response.delete( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleteResponse, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.files.with_raw_response.delete( "string", assistant_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.beta.assistants.files.with_raw_response.delete( "", assistant_id="string", ) class TestAsyncFiles: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.assistants.files.create( "file-abc123", file_id="string", ) assert_matches_type(AssistantFile, file, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.files.with_raw_response.create( "file-abc123", file_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.files.with_streaming_response.create( "file-abc123", file_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(AssistantFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.create( "", file_id="string", ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.assistants.files.retrieve( "string", assistant_id="string", ) assert_matches_type(AssistantFile, file, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.files.with_raw_response.retrieve( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AssistantFile, file, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.files.with_streaming_response.retrieve( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(AssistantFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.retrieve( "string", assistant_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.retrieve( "", assistant_id="string", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.assistants.files.list( "string", ) assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.assistants.files.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.files.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.files.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(AsyncCursorPage[AssistantFile], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.list( "", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.assistants.files.delete( "string", assistant_id="string", ) assert_matches_type(FileDeleteResponse, file, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.files.with_raw_response.delete( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleteResponse, file, path=["response"]) @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.files.with_streaming_response.delete( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(FileDeleteResponse, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.delete( "string", assistant_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.beta.assistants.files.with_raw_response.delete( "", assistant_id="string", ) openai-python-1.12.0/tests/api_resources/beta/chat/000077500000000000000000000000001456126711000222275ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/chat/__init__.py000066400000000000000000000000651456126711000243410ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/test_assistants.py000066400000000000000000000416221456126711000251220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta import ( Assistant, AssistantDeleted, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestAssistants: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( model="string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.create( model="string", description="string", file_ids=["string", "string", "string"], instructions="string", metadata={}, name="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.create( model="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.create( model="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_retrieve(self, client: OpenAI) -> None: assistant = client.beta.assistants.retrieve( "string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.with_raw_response.retrieve( "", ) @parametrize def test_method_update(self, client: OpenAI) -> None: assistant = client.beta.assistants.update( "string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.update( "string", description="string", file_ids=["string", "string", "string"], instructions="string", metadata={}, model="string", name="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.update( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.update( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.with_raw_response.update( "", ) @parametrize def test_method_list(self, client: OpenAI) -> None: assistant = client.beta.assistants.list() assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: assistant = client.beta.assistants.list( after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(SyncCursorPage[Assistant], assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_delete(self, client: OpenAI) -> None: assistant = client.beta.assistants.delete( "string", ) assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.assistants.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.assistants.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(AssistantDeleted, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): client.beta.assistants.with_raw_response.delete( "", ) class TestAsyncAssistants: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( model="string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.create( model="string", description="string", file_ids=["string", "string", "string"], instructions="string", metadata={}, name="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.create( model="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.create( model="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = await response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.retrieve( "string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = await response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.with_raw_response.retrieve( "", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.update( "string", ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.update( "string", description="string", file_ids=["string", "string", "string"], instructions="string", metadata={}, model="string", name="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.update( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(Assistant, assistant, path=["response"]) @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.update( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = await response.parse() assert_matches_type(Assistant, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.with_raw_response.update( "", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.list() assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.list( after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = await response.parse() assert_matches_type(AsyncCursorPage[Assistant], assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: assistant = await async_client.beta.assistants.delete( "string", ) assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.assistants.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = response.parse() assert_matches_type(AssistantDeleted, assistant, path=["response"]) @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.assistants.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" assistant = await response.parse() assert_matches_type(AssistantDeleted, assistant, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"): await async_client.beta.assistants.with_raw_response.delete( "", ) openai-python-1.12.0/tests/api_resources/beta/test_threads.py000066400000000000000000000443661456126711000243700ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types.beta import ( Thread, ThreadDeleted, ) from openai.types.beta.threads import Run base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestThreads: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: thread = client.beta.threads.create() assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.create( messages=[ { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, ], metadata={}, ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_retrieve(self, client: OpenAI) -> None: thread = client.beta.threads.retrieve( "string", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.with_raw_response.retrieve( "", ) @parametrize def test_method_update(self, client: OpenAI) -> None: thread = client.beta.threads.update( "string", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.update( "string", metadata={}, ) assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.update( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.update( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.with_raw_response.update( "", ) @parametrize def test_method_delete(self, client: OpenAI) -> None: thread = client.beta.threads.delete( "string", ) assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(ThreadDeleted, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.with_raw_response.delete( "", ) @parametrize def test_method_create_and_run(self, client: OpenAI) -> None: thread = client.beta.threads.create_and_run( assistant_id="string", ) assert_matches_type(Run, thread, path=["response"]) @parametrize def test_method_create_and_run_with_all_params(self, client: OpenAI) -> None: thread = client.beta.threads.create_and_run( assistant_id="string", instructions="string", metadata={}, model="string", thread={ "messages": [ { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, ], "metadata": {}, }, tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Run, thread, path=["response"]) @parametrize def test_raw_response_create_and_run(self, client: OpenAI) -> None: response = client.beta.threads.with_raw_response.create_and_run( assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Run, thread, path=["response"]) @parametrize def test_streaming_response_create_and_run(self, client: OpenAI) -> None: with client.beta.threads.with_streaming_response.create_and_run( assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Run, thread, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncThreads: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create() assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create( messages=[ { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, ], metadata={}, ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.create() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.create() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = await response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.retrieve( "string", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = await response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.with_raw_response.retrieve( "", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( "string", ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.update( "string", metadata={}, ) assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.update( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Thread, thread, path=["response"]) @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.update( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = await response.parse() assert_matches_type(Thread, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.with_raw_response.update( "", ) @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.delete( "string", ) assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(ThreadDeleted, thread, path=["response"]) @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = await response.parse() assert_matches_type(ThreadDeleted, thread, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.with_raw_response.delete( "", ) @parametrize async def test_method_create_and_run(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create_and_run( assistant_id="string", ) assert_matches_type(Run, thread, path=["response"]) @parametrize async def test_method_create_and_run_with_all_params(self, async_client: AsyncOpenAI) -> None: thread = await async_client.beta.threads.create_and_run( assistant_id="string", instructions="string", metadata={}, model="string", thread={ "messages": [ { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, { "role": "user", "content": "x", "file_ids": ["string"], "metadata": {}, }, ], "metadata": {}, }, tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Run, thread, path=["response"]) @parametrize async def test_raw_response_create_and_run(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.with_raw_response.create_and_run( assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = response.parse() assert_matches_type(Run, thread, path=["response"]) @parametrize async def test_streaming_response_create_and_run(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.with_streaming_response.create_and_run( assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" thread = await response.parse() assert_matches_type(Run, thread, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/beta/threads/000077500000000000000000000000001456126711000227425ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/threads/__init__.py000066400000000000000000000000651456126711000250540ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/threads/messages/000077500000000000000000000000001456126711000245515ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/threads/messages/__init__.py000066400000000000000000000000651456126711000266630ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/threads/messages/test_files.py000066400000000000000000000240721456126711000272710ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta.threads.messages import MessageFile base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestFiles: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file = client.beta.threads.messages.files.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) assert_matches_type(MessageFile, file, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(MessageFile, file, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.messages.files.with_streaming_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(MessageFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="", message_id="msg_abc123", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.beta.threads.messages.files.with_raw_response.retrieve( "", thread_id="thread_abc123", message_id="msg_abc123", ) @parametrize def test_method_list(self, client: OpenAI) -> None: file = client.beta.threads.messages.files.list( "string", thread_id="string", ) assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.beta.threads.messages.files.list( "string", thread_id="string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.messages.files.with_raw_response.list( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.messages.files.with_streaming_response.list( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncCursorPage[MessageFile], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.files.with_raw_response.list( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.files.with_raw_response.list( "", thread_id="string", ) class TestAsyncFiles: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.threads.messages.files.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) assert_matches_type(MessageFile, file, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(MessageFile, file, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.files.with_streaming_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="msg_abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(MessageFile, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="", message_id="msg_abc123", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.files.with_raw_response.retrieve( "file-abc123", thread_id="thread_abc123", message_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.beta.threads.messages.files.with_raw_response.retrieve( "", thread_id="thread_abc123", message_id="msg_abc123", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.threads.messages.files.list( "string", thread_id="string", ) assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.beta.threads.messages.files.list( "string", thread_id="string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.files.with_raw_response.list( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.files.with_streaming_response.list( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(AsyncCursorPage[MessageFile], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.files.with_raw_response.list( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.files.with_raw_response.list( "", thread_id="string", ) openai-python-1.12.0/tests/api_resources/beta/threads/runs/000077500000000000000000000000001456126711000237315ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/beta/threads/runs/__init__.py000066400000000000000000000000651456126711000260430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/beta/threads/runs/test_steps.py000066400000000000000000000232721456126711000265060ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta.threads.runs import RunStep base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestSteps: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.retrieve( "string", thread_id="string", run_id="string", ) assert_matches_type(RunStep, step, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="string", run_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(RunStep, step, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.runs.steps.with_streaming_response.retrieve( "string", thread_id="string", run_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(RunStep, step, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="", run_id="string", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="string", run_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.retrieve( "", thread_id="string", run_id="string", ) @parametrize def test_method_list(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.list( "string", thread_id="string", ) assert_matches_type(SyncCursorPage[RunStep], step, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: step = client.beta.threads.runs.steps.list( "string", thread_id="string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[RunStep], step, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.runs.steps.with_raw_response.list( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(SyncCursorPage[RunStep], step, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.runs.steps.with_streaming_response.list( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(SyncCursorPage[RunStep], step, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.list( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.steps.with_raw_response.list( "", thread_id="string", ) class TestAsyncSteps: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.retrieve( "string", thread_id="string", run_id="string", ) assert_matches_type(RunStep, step, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="string", run_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(RunStep, step, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.steps.with_streaming_response.retrieve( "string", thread_id="string", run_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = await response.parse() assert_matches_type(RunStep, step, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="", run_id="string", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( "string", thread_id="string", run_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `step_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.retrieve( "", thread_id="string", run_id="string", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.list( "string", thread_id="string", ) assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: step = await async_client.beta.threads.runs.steps.list( "string", thread_id="string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.steps.with_raw_response.list( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = response.parse() assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.steps.with_streaming_response.list( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" step = await response.parse() assert_matches_type(AsyncCursorPage[RunStep], step, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.list( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.steps.with_raw_response.list( "", thread_id="string", ) openai-python-1.12.0/tests/api_resources/beta/threads/test_messages.py000066400000000000000000000410671456126711000261720ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta.threads import ThreadMessage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestMessages: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: message = client.beta.threads.messages.create( "string", content="x", role="user", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.create( "string", content="x", role="user", file_ids=["string"], metadata={}, ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.create( "string", content="x", role="user", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.create( "string", content="x", role="user", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.create( "", content="x", role="user", ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: message = client.beta.threads.messages.retrieve( "string", thread_id="string", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.retrieve( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.retrieve( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.retrieve( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.with_raw_response.retrieve( "", thread_id="string", ) @parametrize def test_method_update(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( "string", thread_id="string", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.update( "string", thread_id="string", metadata={}, ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.update( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.update( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.update( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): client.beta.threads.messages.with_raw_response.update( "", thread_id="string", ) @parametrize def test_method_list(self, client: OpenAI) -> None: message = client.beta.threads.messages.list( "string", ) assert_matches_type(SyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: message = client.beta.threads.messages.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.messages.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(SyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.messages.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(SyncCursorPage[ThreadMessage], message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.messages.with_raw_response.list( "", ) class TestAsyncMessages: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.create( "string", content="x", role="user", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.create( "string", content="x", role="user", file_ids=["string"], metadata={}, ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.create( "string", content="x", role="user", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.create( "string", content="x", role="user", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = await response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.create( "", content="x", role="user", ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.retrieve( "string", thread_id="string", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.retrieve( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.retrieve( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = await response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.retrieve( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.retrieve( "", thread_id="string", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.update( "string", thread_id="string", ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.update( "string", thread_id="string", metadata={}, ) assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.update( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.update( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = await response.parse() assert_matches_type(ThreadMessage, message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.update( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `message_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.update( "", thread_id="string", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.list( "string", ) assert_matches_type(AsyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: message = await async_client.beta.threads.messages.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.messages.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = response.parse() assert_matches_type(AsyncCursorPage[ThreadMessage], message, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.messages.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" message = await response.parse() assert_matches_type(AsyncCursorPage[ThreadMessage], message, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.messages.with_raw_response.list( "", ) openai-python-1.12.0/tests/api_resources/beta/threads/test_runs.py000066400000000000000000000574141456126711000253550ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.beta.threads import ( Run, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestRuns: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: run = client.beta.threads.runs.create( "string", assistant_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.create( "string", assistant_id="string", additional_instructions="string", instructions="string", metadata={}, model="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.create( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.create( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_create(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.create( "", assistant_id="string", ) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: run = client.beta.threads.runs.retrieve( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.retrieve( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.retrieve( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.retrieve( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.retrieve( "", thread_id="string", ) @parametrize def test_method_update(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_method_update_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.update( "string", thread_id="string", metadata={}, ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_update(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.update( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize def test_streaming_response_update(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.update( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_update(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.update( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.update( "", thread_id="string", ) @parametrize def test_method_list(self, client: OpenAI) -> None: run = client.beta.threads.runs.list( "string", ) assert_matches_type(SyncCursorPage[Run], run, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: run = client.beta.threads.runs.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(SyncCursorPage[Run], run, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(SyncCursorPage[Run], run, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(SyncCursorPage[Run], run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.list( "", ) @parametrize def test_method_cancel(self, client: OpenAI) -> None: run = client.beta.threads.runs.cancel( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.cancel( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.cancel( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.cancel( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.cancel( "", thread_id="string", ) @parametrize def test_method_submit_tool_outputs(self, client: OpenAI) -> None: run = client.beta.threads.runs.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) assert_matches_type(Run, run, path=["response"]) @parametrize def test_raw_response_submit_tool_outputs(self, client: OpenAI) -> None: response = client.beta.threads.runs.with_raw_response.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize def test_streaming_response_submit_tool_outputs(self, client: OpenAI) -> None: with client.beta.threads.runs.with_streaming_response.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_submit_tool_outputs(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( "string", thread_id="", tool_outputs=[{}, {}, {}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): client.beta.threads.runs.with_raw_response.submit_tool_outputs( "", thread_id="string", tool_outputs=[{}, {}, {}], ) class TestAsyncRuns: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.create( "string", assistant_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.create( "string", assistant_id="string", additional_instructions="string", instructions="string", metadata={}, model="string", tools=[{"type": "code_interpreter"}, {"type": "code_interpreter"}, {"type": "code_interpreter"}], ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.create( "string", assistant_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.create( "string", assistant_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_create(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.create( "", assistant_id="string", ) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.retrieve( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.retrieve( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.retrieve( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.retrieve( "", thread_id="string", ) @parametrize async def test_method_update(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.update( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_method_update_with_all_params(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.update( "string", thread_id="string", metadata={}, ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_update(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.update( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_update(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.update( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_update(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.update( "", thread_id="string", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.list( "string", ) assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.list( "string", after="string", before="string", limit=0, order="asc", ) assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.list( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.list( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(AsyncCursorPage[Run], run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.list( "", ) @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.cancel( "string", thread_id="string", ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.cancel( "string", thread_id="string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.cancel( "string", thread_id="string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( "string", thread_id="", ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.cancel( "", thread_id="string", ) @parametrize async def test_method_submit_tool_outputs(self, async_client: AsyncOpenAI) -> None: run = await async_client.beta.threads.runs.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) assert_matches_type(Run, run, path=["response"]) @parametrize async def test_raw_response_submit_tool_outputs(self, async_client: AsyncOpenAI) -> None: response = await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = response.parse() assert_matches_type(Run, run, path=["response"]) @parametrize async def test_streaming_response_submit_tool_outputs(self, async_client: AsyncOpenAI) -> None: async with async_client.beta.threads.runs.with_streaming_response.submit_tool_outputs( "string", thread_id="string", tool_outputs=[{}, {}, {}], ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" run = await response.parse() assert_matches_type(Run, run, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_submit_tool_outputs(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `thread_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( "string", thread_id="", tool_outputs=[{}, {}, {}], ) with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"): await async_client.beta.threads.runs.with_raw_response.submit_tool_outputs( "", thread_id="string", tool_outputs=[{}, {}, {}], ) openai-python-1.12.0/tests/api_resources/chat/000077500000000000000000000000001456126711000213145ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/chat/__init__.py000066400000000000000000000000651456126711000234260ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/chat/test_completions.py000066400000000000000000000362671456126711000252770ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types.chat import ChatCompletion base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestCompletions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.chat.completions.create( messages=[ { "content": "string", "role": "system", "name": "string", } ], model="gpt-3.5-turbo", frequency_penalty=-2, function_call="none", functions=[ { "description": "string", "name": "string", "parameters": {"foo": "bar"}, } ], logit_bias={"foo": 0}, logprobs=True, max_tokens=0, n=1, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, stop="string", stream=False, temperature=1, tool_choice="none", tools=[ { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, ], top_logprobs=0, top_p=1, user="user-1234", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(ChatCompletion, completion, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) completion_stream.response.close() @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.chat.completions.create( messages=[ { "content": "string", "role": "system", "name": "string", } ], model="gpt-3.5-turbo", stream=True, frequency_penalty=-2, function_call="none", functions=[ { "description": "string", "name": "string", "parameters": {"foo": "bar"}, } ], logit_bias={"foo": 0}, logprobs=True, max_tokens=0, n=1, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, stop="string", temperature=1, tool_choice="none", tools=[ { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, ], top_logprobs=0, top_p=1, user="user-1234", ) completion_stream.response.close() @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.chat.completions.with_raw_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() stream.close() @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() stream.close() assert cast(Any, response.is_closed) is True class TestAsyncCompletions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.chat.completions.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.chat.completions.create( messages=[ { "content": "string", "role": "system", "name": "string", } ], model="gpt-3.5-turbo", frequency_penalty=-2, function_call="none", functions=[ { "description": "string", "name": "string", "parameters": {"foo": "bar"}, } ], logit_bias={"foo": 0}, logprobs=True, max_tokens=0, n=1, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, stop="string", stream=False, temperature=1, tool_choice="none", tools=[ { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, ], top_logprobs=0, top_p=1, user="user-1234", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.chat.completions.with_raw_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(ChatCompletion, completion, path=["response"]) @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = await response.parse() assert_matches_type(ChatCompletion, completion, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.chat.completions.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) await completion_stream.response.aclose() @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.chat.completions.create( messages=[ { "content": "string", "role": "system", "name": "string", } ], model="gpt-3.5-turbo", stream=True, frequency_penalty=-2, function_call="none", functions=[ { "description": "string", "name": "string", "parameters": {"foo": "bar"}, } ], logit_bias={"foo": 0}, logprobs=True, max_tokens=0, n=1, presence_penalty=-2, response_format={"type": "json_object"}, seed=-9223372036854776000, stop="string", temperature=1, tool_choice="none", tools=[ { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, { "type": "function", "function": { "description": "string", "name": "string", "parameters": {"foo": "bar"}, }, }, ], top_logprobs=0, top_p=1, user="user-1234", ) await completion_stream.response.aclose() @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.chat.completions.with_raw_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() await stream.close() @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.chat.completions.with_streaming_response.create( messages=[ { "content": "string", "role": "system", } ], model="gpt-3.5-turbo", stream=True, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() await stream.close() assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/fine_tuning/000077500000000000000000000000001456126711000227025ustar00rootroot00000000000000openai-python-1.12.0/tests/api_resources/fine_tuning/__init__.py000066400000000000000000000000651456126711000250140ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. openai-python-1.12.0/tests/api_resources/fine_tuning/test_jobs.py000066400000000000000000000417651456126711000252650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.pagination import SyncCursorPage, AsyncCursorPage from openai.types.fine_tuning import ( FineTuningJob, FineTuningJobEvent, ) base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestJobs: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( model="gpt-3.5-turbo", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.create( model="gpt-3.5-turbo", training_file="file-abc123", hyperparameters={ "batch_size": "auto", "learning_rate_multiplier": "auto", "n_epochs": "auto", }, suffix="x", validation_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.create( model="gpt-3.5-turbo", training_file="file-abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.create( model="gpt-3.5-turbo", training_file="file-abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_retrieve(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): client.fine_tuning.jobs.with_raw_response.retrieve( "", ) @parametrize def test_method_list(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list() assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list( after="string", limit=0, ) assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(SyncCursorPage[FineTuningJob], job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_cancel(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_raw_response_cancel(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize def test_streaming_response_cancel(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_cancel(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): client.fine_tuning.jobs.with_raw_response.cancel( "", ) @parametrize def test_method_list_events(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize def test_method_list_events_with_all_params(self, client: OpenAI) -> None: job = client.fine_tuning.jobs.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", after="string", limit=0, ) assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize def test_raw_response_list_events(self, client: OpenAI) -> None: response = client.fine_tuning.jobs.with_raw_response.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize def test_streaming_response_list_events(self, client: OpenAI) -> None: with client.fine_tuning.jobs.with_streaming_response.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(SyncCursorPage[FineTuningJobEvent], job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_list_events(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): client.fine_tuning.jobs.with_raw_response.list_events( "", ) class TestAsyncJobs: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( model="gpt-3.5-turbo", training_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.create( model="gpt-3.5-turbo", training_file="file-abc123", hyperparameters={ "batch_size": "auto", "learning_rate_multiplier": "auto", "n_epochs": "auto", }, suffix="x", validation_file="file-abc123", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.create( model="gpt-3.5-turbo", training_file="file-abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.create( model="gpt-3.5-turbo", training_file="file-abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.retrieve( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): await async_client.fine_tuning.jobs.with_raw_response.retrieve( "", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list() assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list( after="string", limit=0, ) assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_cancel(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) @parametrize async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.cancel( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() assert_matches_type(FineTuningJob, job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): await async_client.fine_tuning.jobs.with_raw_response.cancel( "", ) @parametrize async def test_method_list_events(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize async def test_method_list_events_with_all_params(self, async_client: AsyncOpenAI) -> None: job = await async_client.fine_tuning.jobs.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", after="string", limit=0, ) assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize async def test_raw_response_list_events(self, async_client: AsyncOpenAI) -> None: response = await async_client.fine_tuning.jobs.with_raw_response.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = response.parse() assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) @parametrize async def test_streaming_response_list_events(self, async_client: AsyncOpenAI) -> None: async with async_client.fine_tuning.jobs.with_streaming_response.list_events( "ft-AF1WoRqd3aJAHsqc9NY7iL8F", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" job = await response.parse() assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_list_events(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"): await async_client.fine_tuning.jobs.with_raw_response.list_events( "", ) openai-python-1.12.0/tests/api_resources/test_completions.py000066400000000000000000000207521456126711000243500ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import Completion base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestCompletions: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @parametrize def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: completion = client.completions.create( model="string", prompt="This is a test.", best_of=0, echo=True, frequency_penalty=-2, logit_bias={"foo": 0}, logprobs=0, max_tokens=16, n=1, presence_penalty=-2, seed=-9223372036854776000, stop="\n", stream=False, suffix="test.", temperature=1, top_p=1, user="user-1234", ) assert_matches_type(Completion, completion, path=["response"]) @parametrize def test_raw_response_create_overload_1(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( model="string", prompt="This is a test.", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(Completion, completion, path=["response"]) @parametrize def test_streaming_response_create_overload_1(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( model="string", prompt="This is a test.", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(Completion, completion, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_create_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( model="string", prompt="This is a test.", stream=True, ) completion_stream.response.close() @parametrize def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: completion_stream = client.completions.create( model="string", prompt="This is a test.", stream=True, best_of=0, echo=True, frequency_penalty=-2, logit_bias={"foo": 0}, logprobs=0, max_tokens=16, n=1, presence_penalty=-2, seed=-9223372036854776000, stop="\n", suffix="test.", temperature=1, top_p=1, user="user-1234", ) completion_stream.response.close() @parametrize def test_raw_response_create_overload_2(self, client: OpenAI) -> None: response = client.completions.with_raw_response.create( model="string", prompt="This is a test.", stream=True, ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() stream.close() @parametrize def test_streaming_response_create_overload_2(self, client: OpenAI) -> None: with client.completions.with_streaming_response.create( model="string", prompt="This is a test.", stream=True, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() stream.close() assert cast(Any, response.is_closed) is True class TestAsyncCompletions: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( model="string", prompt="This is a test.", ) assert_matches_type(Completion, completion, path=["response"]) @parametrize async def test_method_create_with_all_params_overload_1(self, async_client: AsyncOpenAI) -> None: completion = await async_client.completions.create( model="string", prompt="This is a test.", best_of=0, echo=True, frequency_penalty=-2, logit_bias={"foo": 0}, logprobs=0, max_tokens=16, n=1, presence_penalty=-2, seed=-9223372036854776000, stop="\n", stream=False, suffix="test.", temperature=1, top_p=1, user="user-1234", ) assert_matches_type(Completion, completion, path=["response"]) @parametrize async def test_raw_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( model="string", prompt="This is a test.", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = response.parse() assert_matches_type(Completion, completion, path=["response"]) @parametrize async def test_streaming_response_create_overload_1(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( model="string", prompt="This is a test.", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" completion = await response.parse() assert_matches_type(Completion, completion, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_create_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( model="string", prompt="This is a test.", stream=True, ) await completion_stream.response.aclose() @parametrize async def test_method_create_with_all_params_overload_2(self, async_client: AsyncOpenAI) -> None: completion_stream = await async_client.completions.create( model="string", prompt="This is a test.", stream=True, best_of=0, echo=True, frequency_penalty=-2, logit_bias={"foo": 0}, logprobs=0, max_tokens=16, n=1, presence_penalty=-2, seed=-9223372036854776000, stop="\n", suffix="test.", temperature=1, top_p=1, user="user-1234", ) await completion_stream.response.aclose() @parametrize async def test_raw_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: response = await async_client.completions.with_raw_response.create( model="string", prompt="This is a test.", stream=True, ) assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = response.parse() await stream.close() @parametrize async def test_streaming_response_create_overload_2(self, async_client: AsyncOpenAI) -> None: async with async_client.completions.with_streaming_response.create( model="string", prompt="This is a test.", stream=True, ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" stream = await response.parse() await stream.close() assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/test_embeddings.py000066400000000000000000000105651456126711000241160ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import CreateEmbeddingResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestEmbeddings: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: embedding = client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", dimensions=1, encoding_format="float", user="user-1234", ) assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" embedding = response.parse() assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" embedding = response.parse() assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncEmbeddings: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: embedding = await async_client.embeddings.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", dimensions=1, encoding_format="float", user="user-1234", ) assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.embeddings.with_raw_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" embedding = response.parse() assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.embeddings.with_streaming_response.create( input="The quick brown fox jumped over the lazy dog", model="text-embedding-3-small", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" embedding = await response.parse() assert_matches_type(CreateEmbeddingResponse, embedding, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/test_files.py000066400000000000000000000454701456126711000231220ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import httpx import pytest from respx import MockRouter import openai._legacy_response as _legacy_response from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import FileObject, FileDeleted from openai.pagination import SyncPage, AsyncPage # pyright: reportDeprecated=false base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestFiles: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: file = client.files.create( file=b"raw file contents", purpose="fine-tune", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.files.with_raw_response.create( file=b"raw file contents", purpose="fine-tune", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.files.with_streaming_response.create( file=b"raw file contents", purpose="fine-tune", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_retrieve(self, client: OpenAI) -> None: file = client.files.retrieve( "string", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.files.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.files.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.files.with_raw_response.retrieve( "", ) @parametrize def test_method_list(self, client: OpenAI) -> None: file = client.files.list() assert_matches_type(SyncPage[FileObject], file, path=["response"]) @parametrize def test_method_list_with_all_params(self, client: OpenAI) -> None: file = client.files.list( purpose="string", ) assert_matches_type(SyncPage[FileObject], file, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.files.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncPage[FileObject], file, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.files.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(SyncPage[FileObject], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_delete(self, client: OpenAI) -> None: file = client.files.delete( "string", ) assert_matches_type(FileDeleted, file, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.files.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleted, file, path=["response"]) @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.files.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleted, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.files.with_raw_response.delete( "", ) @parametrize @pytest.mark.respx(base_url=base_url) def test_method_content(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) file = client.files.content( "string", ) assert isinstance(file, _legacy_response.HttpxBinaryResponseContent) assert file.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) def test_raw_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = client.files.with_raw_response.content( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(_legacy_response.HttpxBinaryResponseContent, file, path=["response"]) @parametrize @pytest.mark.respx(base_url=base_url) def test_streaming_response_content(self, client: OpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.files.with_streaming_response.content( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(bytes, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize @pytest.mark.respx(base_url=base_url) def test_path_params_content(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.files.with_raw_response.content( "", ) @parametrize def test_method_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): file = client.files.retrieve_content( "string", ) assert_matches_type(str, file, path=["response"]) @parametrize def test_raw_response_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): response = client.files.with_raw_response.retrieve_content( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(str, file, path=["response"]) @parametrize def test_streaming_response_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): with client.files.with_streaming_response.retrieve_content( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(str, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve_content(self, client: OpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): client.files.with_raw_response.retrieve_content( "", ) class TestAsyncFiles: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.create( file=b"raw file contents", purpose="fine-tune", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.create( file=b"raw file contents", purpose="fine-tune", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.create( file=b"raw file contents", purpose="fine-tune", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(FileObject, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.retrieve( "string", ) assert_matches_type(FileObject, file, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.retrieve( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileObject, file, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.retrieve( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(FileObject, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.files.with_raw_response.retrieve( "", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.list() assert_matches_type(AsyncPage[FileObject], file, path=["response"]) @parametrize async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.list( purpose="string", ) assert_matches_type(AsyncPage[FileObject], file, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(AsyncPage[FileObject], file, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(AsyncPage[FileObject], file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: file = await async_client.files.delete( "string", ) assert_matches_type(FileDeleted, file, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.files.with_raw_response.delete( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(FileDeleted, file, path=["response"]) @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.files.with_streaming_response.delete( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(FileDeleted, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.files.with_raw_response.delete( "", ) @parametrize @pytest.mark.respx(base_url=base_url) async def test_method_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) file = await async_client.files.content( "string", ) assert isinstance(file, _legacy_response.HttpxBinaryResponseContent) assert file.json() == {"foo": "bar"} @parametrize @pytest.mark.respx(base_url=base_url) async def test_raw_response_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await async_client.files.with_raw_response.content( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(_legacy_response.HttpxBinaryResponseContent, file, path=["response"]) @parametrize @pytest.mark.respx(base_url=base_url) async def test_streaming_response_content(self, async_client: AsyncOpenAI, respx_mock: MockRouter) -> None: respx_mock.get("/files/string/content").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.files.with_streaming_response.content( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(bytes, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize @pytest.mark.respx(base_url=base_url) async def test_path_params_content(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.files.with_raw_response.content( "", ) @parametrize async def test_method_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): file = await async_client.files.retrieve_content( "string", ) assert_matches_type(str, file, path=["response"]) @parametrize async def test_raw_response_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): response = await async_client.files.with_raw_response.retrieve_content( "string", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = response.parse() assert_matches_type(str, file, path=["response"]) @parametrize async def test_streaming_response_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): async with async_client.files.with_streaming_response.retrieve_content( "string", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" file = await response.parse() assert_matches_type(str, file, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve_content(self, async_client: AsyncOpenAI) -> None: with pytest.warns(DeprecationWarning): with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"): await async_client.files.with_raw_response.retrieve_content( "", ) openai-python-1.12.0/tests/api_resources/test_images.py000066400000000000000000000255621456126711000232650ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import ImagesResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestImages: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create_variation(self, client: OpenAI) -> None: image = client.images.create_variation( image=b"raw file contents", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_method_create_variation_with_all_params(self, client: OpenAI) -> None: image = client.images.create_variation( image=b"raw file contents", model="dall-e-2", n=1, response_format="url", size="1024x1024", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_raw_response_create_variation(self, client: OpenAI) -> None: response = client.images.with_raw_response.create_variation( image=b"raw file contents", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_streaming_response_create_variation(self, client: OpenAI) -> None: with client.images.with_streaming_response.create_variation( image=b"raw file contents", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_edit(self, client: OpenAI) -> None: image = client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_method_edit_with_all_params(self, client: OpenAI) -> None: image = client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", mask=b"raw file contents", model="dall-e-2", n=1, response_format="url", size="1024x1024", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_raw_response_edit(self, client: OpenAI) -> None: response = client.images.with_raw_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_streaming_response_edit(self, client: OpenAI) -> None: with client.images.with_streaming_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_generate(self, client: OpenAI) -> None: image = client.images.generate( prompt="A cute baby sea otter", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_method_generate_with_all_params(self, client: OpenAI) -> None: image = client.images.generate( prompt="A cute baby sea otter", model="dall-e-3", n=1, quality="standard", response_format="url", size="1024x1024", style="vivid", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_raw_response_generate(self, client: OpenAI) -> None: response = client.images.with_raw_response.generate( prompt="A cute baby sea otter", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize def test_streaming_response_generate(self, client: OpenAI) -> None: with client.images.with_streaming_response.generate( prompt="A cute baby sea otter", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncImages: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create_variation(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.create_variation( image=b"raw file contents", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_method_create_variation_with_all_params(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.create_variation( image=b"raw file contents", model="dall-e-2", n=1, response_format="url", size="1024x1024", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_raw_response_create_variation(self, async_client: AsyncOpenAI) -> None: response = await async_client.images.with_raw_response.create_variation( image=b"raw file contents", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_streaming_response_create_variation(self, async_client: AsyncOpenAI) -> None: async with async_client.images.with_streaming_response.create_variation( image=b"raw file contents", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = await response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_edit(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", mask=b"raw file contents", model="dall-e-2", n=1, response_format="url", size="1024x1024", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_raw_response_edit(self, async_client: AsyncOpenAI) -> None: response = await async_client.images.with_raw_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_streaming_response_edit(self, async_client: AsyncOpenAI) -> None: async with async_client.images.with_streaming_response.edit( image=b"raw file contents", prompt="A cute baby sea otter wearing a beret", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = await response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_generate(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.generate( prompt="A cute baby sea otter", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) -> None: image = await async_client.images.generate( prompt="A cute baby sea otter", model="dall-e-3", n=1, quality="standard", response_format="url", size="1024x1024", style="vivid", user="user-1234", ) assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_raw_response_generate(self, async_client: AsyncOpenAI) -> None: response = await async_client.images.with_raw_response.generate( prompt="A cute baby sea otter", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) @parametrize async def test_streaming_response_generate(self, async_client: AsyncOpenAI) -> None: async with async_client.images.with_streaming_response.generate( prompt="A cute baby sea otter", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" image = await response.parse() assert_matches_type(ImagesResponse, image, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/api_resources/test_models.py000066400000000000000000000207631456126711000233010ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import Model, ModelDeleted from openai.pagination import SyncPage, AsyncPage base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestModels: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_retrieve(self, client: OpenAI) -> None: model = client.models.retrieve( "gpt-3.5-turbo", ) assert_matches_type(Model, model, path=["response"]) @parametrize def test_raw_response_retrieve(self, client: OpenAI) -> None: response = client.models.with_raw_response.retrieve( "gpt-3.5-turbo", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(Model, model, path=["response"]) @parametrize def test_streaming_response_retrieve(self, client: OpenAI) -> None: with client.models.with_streaming_response.retrieve( "gpt-3.5-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_retrieve(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): client.models.with_raw_response.retrieve( "", ) @parametrize def test_method_list(self, client: OpenAI) -> None: model = client.models.list() assert_matches_type(SyncPage[Model], model, path=["response"]) @parametrize def test_raw_response_list(self, client: OpenAI) -> None: response = client.models.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(SyncPage[Model], model, path=["response"]) @parametrize def test_streaming_response_list(self, client: OpenAI) -> None: with client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(SyncPage[Model], model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_method_delete(self, client: OpenAI) -> None: model = client.models.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize def test_raw_response_delete(self, client: OpenAI) -> None: response = client.models.with_raw_response.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize def test_streaming_response_delete(self, client: OpenAI) -> None: with client.models.with_streaming_response.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(ModelDeleted, model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize def test_path_params_delete(self, client: OpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): client.models.with_raw_response.delete( "", ) class TestAsyncModels: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.retrieve( "gpt-3.5-turbo", ) assert_matches_type(Model, model, path=["response"]) @parametrize async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.retrieve( "gpt-3.5-turbo", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(Model, model, path=["response"]) @parametrize async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.retrieve( "gpt-3.5-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() assert_matches_type(Model, model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): await async_client.models.with_raw_response.retrieve( "", ) @parametrize async def test_method_list(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.list() assert_matches_type(AsyncPage[Model], model, path=["response"]) @parametrize async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.list() assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(AsyncPage[Model], model, path=["response"]) @parametrize async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.list() as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() assert_matches_type(AsyncPage[Model], model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_method_delete(self, async_client: AsyncOpenAI) -> None: model = await async_client.models.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None: response = await async_client.models.with_raw_response.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = response.parse() assert_matches_type(ModelDeleted, model, path=["response"]) @parametrize async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None: async with async_client.models.with_streaming_response.delete( "ft:gpt-3.5-turbo:acemeco:suffix:abc123", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" model = await response.parse() assert_matches_type(ModelDeleted, model, path=["response"]) assert cast(Any, response.is_closed) is True @parametrize async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None: with pytest.raises(ValueError, match=r"Expected a non-empty value for `model` but received ''"): await async_client.models.with_raw_response.delete( "", ) openai-python-1.12.0/tests/api_resources/test_moderations.py000066400000000000000000000074261456126711000243430ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os from typing import Any, cast import pytest from openai import OpenAI, AsyncOpenAI from tests.utils import assert_matches_type from openai.types import ModerationCreateResponse base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") class TestModerations: parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize def test_method_create(self, client: OpenAI) -> None: moderation = client.moderations.create( input="I want to kill them.", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize def test_method_create_with_all_params(self, client: OpenAI) -> None: moderation = client.moderations.create( input="I want to kill them.", model="text-moderation-stable", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize def test_raw_response_create(self, client: OpenAI) -> None: response = client.moderations.with_raw_response.create( input="I want to kill them.", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" moderation = response.parse() assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize def test_streaming_response_create(self, client: OpenAI) -> None: with client.moderations.with_streaming_response.create( input="I want to kill them.", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" moderation = response.parse() assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) assert cast(Any, response.is_closed) is True class TestAsyncModerations: parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"]) @parametrize async def test_method_create(self, async_client: AsyncOpenAI) -> None: moderation = await async_client.moderations.create( input="I want to kill them.", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None: moderation = await async_client.moderations.create( input="I want to kill them.", model="text-moderation-stable", ) assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None: response = await async_client.moderations.with_raw_response.create( input="I want to kill them.", ) assert response.is_closed is True assert response.http_request.headers.get("X-Stainless-Lang") == "python" moderation = response.parse() assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) @parametrize async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None: async with async_client.moderations.with_streaming_response.create( input="I want to kill them.", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" moderation = await response.parse() assert_matches_type(ModerationCreateResponse, moderation, path=["response"]) assert cast(Any, response.is_closed) is True openai-python-1.12.0/tests/conftest.py000066400000000000000000000026211456126711000177320ustar00rootroot00000000000000from __future__ import annotations import os import asyncio import logging from typing import TYPE_CHECKING, Iterator, AsyncIterator import pytest from openai import OpenAI, AsyncOpenAI if TYPE_CHECKING: from _pytest.fixtures import FixtureRequest pytest.register_assert_rewrite("tests.utils") logging.getLogger("openai").setLevel(logging.DEBUG) @pytest.fixture(scope="session") def event_loop() -> Iterator[asyncio.AbstractEventLoop]: loop = asyncio.new_event_loop() yield loop loop.close() base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") api_key = "My API Key" @pytest.fixture(scope="session") def client(request: FixtureRequest) -> Iterator[OpenAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") with OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client @pytest.fixture(scope="session") async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncOpenAI]: strict = getattr(request, "param", True) if not isinstance(strict, bool): raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}") async with AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client: yield client openai-python-1.12.0/tests/lib/000077500000000000000000000000001456126711000163005ustar00rootroot00000000000000openai-python-1.12.0/tests/lib/test_azure.py000066400000000000000000000034001456126711000210340ustar00rootroot00000000000000from typing import Union from typing_extensions import Literal import pytest from openai._models import FinalRequestOptions from openai.lib.azure import AzureOpenAI, AsyncAzureOpenAI Client = Union[AzureOpenAI, AsyncAzureOpenAI] sync_client = AzureOpenAI( api_version="2023-07-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ) async_client = AsyncAzureOpenAI( api_version="2023-07-01", api_key="example API key", azure_endpoint="https://example-resource.azure.openai.com", ) @pytest.mark.parametrize("client", [sync_client, async_client]) def test_implicit_deployment_path(client: Client) -> None: req = client._build_request( FinalRequestOptions.construct( method="post", url="/chat/completions", json_data={"model": "my-deployment-model"}, ) ) assert ( req.url == "https://example-resource.azure.openai.com/openai/deployments/my-deployment-model/chat/completions?api-version=2023-07-01" ) @pytest.mark.parametrize( "client,method", [ (sync_client, "copy"), (sync_client, "with_options"), (async_client, "copy"), (async_client, "with_options"), ], ) def test_client_copying(client: Client, method: Literal["copy", "with_options"]) -> None: if method == "copy": copied = client.copy() else: copied = client.with_options() assert copied._custom_query == {"api-version": "2023-07-01"} @pytest.mark.parametrize( "client", [sync_client, async_client], ) def test_client_copying_override_options(client: Client) -> None: copied = client.copy( api_version="2022-05-01", ) assert copied._custom_query == {"api-version": "2022-05-01"} openai-python-1.12.0/tests/lib/test_old_api.py000066400000000000000000000006501456126711000213210ustar00rootroot00000000000000import pytest import openai from openai.lib._old_api import APIRemovedInV1 def test_basic_attribute_access_works() -> None: for attr in dir(openai): dir(getattr(openai, attr)) def test_helpful_error_is_raised() -> None: with pytest.raises(APIRemovedInV1): openai.Completion.create() # type: ignore with pytest.raises(APIRemovedInV1): openai.ChatCompletion.create() # type: ignore openai-python-1.12.0/tests/test_client.py000066400000000000000000001575361456126711000204420ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import gc import os import json import asyncio import inspect import tracemalloc from typing import Any, Union, cast from unittest import mock import httpx import pytest from respx import MockRouter from pydantic import ValidationError from openai import OpenAI, AsyncOpenAI, APIResponseValidationError from openai._client import OpenAI, AsyncOpenAI from openai._models import BaseModel, FinalRequestOptions from openai._constants import RAW_RESPONSE_HEADER from openai._streaming import Stream, AsyncStream from openai._exceptions import OpenAIError, APIStatusError, APITimeoutError, APIResponseValidationError from openai._base_client import DEFAULT_TIMEOUT, HTTPX_DEFAULT_TIMEOUT, BaseClient, make_request_options from .utils import update_env base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010") api_key = "My API Key" def _get_params(client: BaseClient[Any, Any]) -> dict[str, str]: request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) return dict(url.params) def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float: return 0.1 def _get_open_connections(client: OpenAI | AsyncOpenAI) -> int: transport = client._client._transport assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport) pool = transport._pool return len(pool._requests) class TestOpenAI: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) def test_raw_response(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) response = self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) copied = self.client.copy(api_key="another My API Key") assert copied.api_key == "another My API Key" assert self.client.api_key == "My API Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) assert copied.max_retries == 7 assert self.client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout assert isinstance(self.client.timeout, httpx.Timeout) copied = self.client.copy(timeout=None) assert copied.timeout is None assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" # does not override the already given value when not specified copied = client.copy() assert copied.default_headers["X-Foo"] == "bar" # merges already given headers copied = client.copy(default_headers={"X-Bar": "stainless"}) assert copied.default_headers["X-Foo"] == "bar" assert copied.default_headers["X-Bar"] == "stainless" # uses new values for any already given headers copied = client.copy(default_headers={"X-Foo": "stainless"}) assert copied.default_headers["X-Foo"] == "stainless" # set_default_headers # completely overrides already set values copied = client.copy(set_default_headers={}) assert copied.default_headers.get("X-Foo") is None copied = client.copy(set_default_headers={"X-Bar": "Robert"}) assert copied.default_headers["X-Bar"] == "Robert" with pytest.raises( ValueError, match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" # does not override the already given value when not specified copied = client.copy() assert _get_params(copied)["foo"] == "bar" # merges already given params copied = client.copy(default_query={"bar": "stainless"}) params = _get_params(copied) assert params["foo"] == "bar" assert params["bar"] == "stainless" # uses new values for any already given headers copied = client.copy(default_query={"foo": "stainless"}) assert _get_params(copied)["foo"] == "stainless" # set_default_query # completely overrides already set values copied = client.copy(set_default_query={}) assert _get_params(copied) == {} copied = client.copy(set_default_query={"bar": "Robert"}) assert _get_params(copied)["bar"] == "Robert" with pytest.raises( ValueError, # TODO: update match="`default_query` and `set_default_query` arguments are mutually exclusive", ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) def test_copy_signature(self) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. self.client.__init__, # type: ignore[misc] ) copy_signature = inspect.signature(self.client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): if name in exclude_params: continue copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: client = self.client.copy() client._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) gc.collect() tracemalloc.start(1000) snapshot_before = tracemalloc.take_snapshot() ITERATIONS = 10 for _ in range(ITERATIONS): build_request(options) gc.collect() snapshot_after = tracemalloc.take_snapshot() tracemalloc.stop() def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: if diff.count == 0: # Avoid false positives by considering only leaks (i.e. allocations that persist). return if diff.count % ITERATIONS != 0: # Avoid false positives by considering only leaks that appear per iteration. return for frame in diff.traceback: if any( frame.filename.endswith(fragment) for fragment in [ # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. "openai/_legacy_response.py", "openai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. "openai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] ): return leaks.append(diff) leaks: list[tracemalloc.StatisticDiff] = [] for diff in snapshot_after.compare_to(snapshot_before, "traceback"): add_leak(leaks, diff) if leaks: for leak in leaks: print("MEMORY LEAK:", leak) for frame in leak.traceback: print(frame) raise AssertionError() def test_request_timeout(self) -> None: request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT request = self.client._build_request( FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(100.0) def test_client_timeout_option(self) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0)) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default def test_default_headers_option(self) -> None: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" client2 = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", "X-Stainless-Lang": "my-overriding-header", }, ) request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): client2 = OpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: client = OpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) assert dict(url.params) == {"query_param": "bar"} request = client._build_request( FinalRequestOptions( method="get", url="/foo", params={"foo": "baz", "query_param": "overriden"}, ) ) url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overriden"} def test_request_extra_json(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, extra_json={"baz": False}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", extra_json={"baz": False}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar", "baz": True}, extra_json={"baz": None}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} def test_request_extra_headers(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options(extra_headers={"X-Foo": "Foo"}), ), ) assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( extra_headers={"X-Bar": "false"}, ), ), ) assert request.headers.get("X-Bar") == "false" def test_request_extra_query(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( extra_query={"my_query_param": "Foo"}, ), ), ) params = dict(request.url.params) assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( query={"bar": "1"}, extra_query={"foo": "2"}, ), ), ) params = dict(request.url.params) assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( query={"foo": "1"}, extra_query={"foo": "2"}, ), ), ) params = dict(request.url.params) assert params == {"foo": "2"} def test_multipart_repeating_array(self, client: OpenAI) -> None: request = client._build_request( FinalRequestOptions.construct( method="get", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, files=[("foo.txt", b"hello world")], ) ) assert request.read().split(b"\r\n") == [ b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="array[]"', b"", b"foo", b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="array[]"', b"", b"bar", b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', b"Content-Type: application/octet-stream", b"", b"hello world", b"--6b7ba517decee4a450543ea6ae821c82--", b"", ] @pytest.mark.respx(base_url=base_url) def test_basic_union_response(self, respx_mock: MockRouter) -> None: class Model1(BaseModel): name: str class Model2(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) def test_union_response_different_types(self, respx_mock: MockRouter) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): foo: int class Model2(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ class Model(BaseModel): foo: int respx_mock.get("/foo").mock( return_value=httpx.Response( 200, content=json.dumps({"foo": 2}), headers={"Content-Type": "application/text"}, ) ) response = self.client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 def test_base_url_setter(self) -> None: client = OpenAI(base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True) assert client.base_url == "https://example.com/from_init/" client.base_url = "https://example.com/from_setter" # type: ignore[assignment] assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"): client = OpenAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True), OpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.Client(), ), ], ids=["standard", "custom http client"], ) def test_base_url_trailing_slash(self, client: OpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, ), ) assert request.url == "http://localhost:5000/custom/path/foo" @pytest.mark.parametrize( "client", [ OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True), OpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.Client(), ), ], ids=["standard", "custom http client"], ) def test_base_url_no_trailing_slash(self, client: OpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, ), ) assert request.url == "http://localhost:5000/custom/path/foo" @pytest.mark.parametrize( "client", [ OpenAI(base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True), OpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.Client(), ), ], ids=["standard", "custom http client"], ) def test_absolute_request_url(self, client: OpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="https://myapi.com/foo", json_data={"foo": "bar"}, ), ) assert request.url == "https://myapi.com/foo" def test_copied_client_does_not_close_http(self) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() assert copied is not client del copied assert not client.is_closed() def test_client_context_manager(self) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with client as c2: assert c2 is client assert not c2.is_closed() assert not client.is_closed() assert client.is_closed() @pytest.mark.respx(base_url=base_url) def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: self.client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @pytest.mark.respx(base_url=base_url) def test_default_stream_cls(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) stream = self.client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]) assert isinstance(stream, Stream) stream.response.close() @pytest.mark.respx(base_url=base_url) def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) strict_client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ [3, "20", 20], [3, "0", 0.5], [3, "-10", 0.5], [3, "60", 60], [3, "61", 0.5], [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], [3, "99999999999999999999999999999999999", 0.5], [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = OpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): self.client.post( "/chat/completions", body=cast( object, dict( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ), ), cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): self.client.post( "/chat/completions", body=cast( object, dict( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ), ), cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 class TestAsyncOpenAI: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_raw_response(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) response = await self.client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} def test_copy(self) -> None: copied = self.client.copy() assert id(copied) != id(self.client) copied = self.client.copy(api_key="another My API Key") assert copied.api_key == "another My API Key" assert self.client.api_key == "My API Key" def test_copy_default_options(self) -> None: # options that have a default are overridden correctly copied = self.client.copy(max_retries=7) assert copied.max_retries == 7 assert self.client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout assert isinstance(self.client.timeout, httpx.Timeout) copied = self.client.copy(timeout=None) assert copied.timeout is None assert isinstance(self.client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) assert client.default_headers["X-Foo"] == "bar" # does not override the already given value when not specified copied = client.copy() assert copied.default_headers["X-Foo"] == "bar" # merges already given headers copied = client.copy(default_headers={"X-Bar": "stainless"}) assert copied.default_headers["X-Foo"] == "bar" assert copied.default_headers["X-Bar"] == "stainless" # uses new values for any already given headers copied = client.copy(default_headers={"X-Foo": "stainless"}) assert copied.default_headers["X-Foo"] == "stainless" # set_default_headers # completely overrides already set values copied = client.copy(set_default_headers={}) assert copied.default_headers.get("X-Foo") is None copied = client.copy(set_default_headers={"X-Bar": "Robert"}) assert copied.default_headers["X-Bar"] == "Robert" with pytest.raises( ValueError, match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) def test_copy_default_query(self) -> None: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"} ) assert _get_params(client)["foo"] == "bar" # does not override the already given value when not specified copied = client.copy() assert _get_params(copied)["foo"] == "bar" # merges already given params copied = client.copy(default_query={"bar": "stainless"}) params = _get_params(copied) assert params["foo"] == "bar" assert params["bar"] == "stainless" # uses new values for any already given headers copied = client.copy(default_query={"foo": "stainless"}) assert _get_params(copied)["foo"] == "stainless" # set_default_query # completely overrides already set values copied = client.copy(set_default_query={}) assert _get_params(copied) == {} copied = client.copy(set_default_query={"bar": "Robert"}) assert _get_params(copied)["bar"] == "Robert" with pytest.raises( ValueError, # TODO: update match="`default_query` and `set_default_query` arguments are mutually exclusive", ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) def test_copy_signature(self) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. self.client.__init__, # type: ignore[misc] ) copy_signature = inspect.signature(self.client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): if name in exclude_params: continue copy_param = copy_signature.parameters.get(name) assert copy_param is not None, f"copy() signature is missing the {name} param" def test_copy_build_request(self) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: client = self.client.copy() client._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) gc.collect() tracemalloc.start(1000) snapshot_before = tracemalloc.take_snapshot() ITERATIONS = 10 for _ in range(ITERATIONS): build_request(options) gc.collect() snapshot_after = tracemalloc.take_snapshot() tracemalloc.stop() def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None: if diff.count == 0: # Avoid false positives by considering only leaks (i.e. allocations that persist). return if diff.count % ITERATIONS != 0: # Avoid false positives by considering only leaks that appear per iteration. return for frame in diff.traceback: if any( frame.filename.endswith(fragment) for fragment in [ # to_raw_response_wrapper leaks through the @functools.wraps() decorator. # # removing the decorator fixes the leak for reasons we don't understand. "openai/_legacy_response.py", "openai/_response.py", # pydantic.BaseModel.model_dump || pydantic.BaseModel.dict leak memory for some reason. "openai/_compat.py", # Standard library leaks we don't care about. "/logging/__init__.py", ] ): return leaks.append(diff) leaks: list[tracemalloc.StatisticDiff] = [] for diff in snapshot_after.compare_to(snapshot_before, "traceback"): add_leak(leaks, diff) if leaks: for leak in leaks: print("MEMORY LEAK:", leak) for frame in leak.traceback: print(frame) raise AssertionError() async def test_request_timeout(self) -> None: request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT request = self.client._build_request( FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(100.0) async def test_client_timeout_option(self) -> None: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, timeout=httpx.Timeout(0) ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, http_client=http_client ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default def test_default_headers_option(self) -> None: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" client2 = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={ "X-Foo": "stainless", "X-Stainless-Lang": "my-overriding-header", }, ) request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" def test_validate_headers(self) -> None: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("Authorization") == f"Bearer {api_key}" with pytest.raises(OpenAIError): client2 = AsyncOpenAI(base_url=base_url, api_key=None, _strict_response_validation=True) _ = client2 def test_default_query_option(self) -> None: client = AsyncOpenAI( base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"} ) request = client._build_request(FinalRequestOptions(method="get", url="/foo")) url = httpx.URL(request.url) assert dict(url.params) == {"query_param": "bar"} request = client._build_request( FinalRequestOptions( method="get", url="/foo", params={"foo": "baz", "query_param": "overriden"}, ) ) url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overriden"} def test_request_extra_json(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, extra_json={"baz": False}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", extra_json={"baz": False}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar", "baz": True}, extra_json={"baz": None}, ), ) data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} def test_request_extra_headers(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options(extra_headers={"X-Foo": "Foo"}), ), ) assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( extra_headers={"X-Bar": "false"}, ), ), ) assert request.headers.get("X-Bar") == "false" def test_request_extra_query(self) -> None: request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( extra_query={"my_query_param": "Foo"}, ), ), ) params = dict(request.url.params) assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( query={"bar": "1"}, extra_query={"foo": "2"}, ), ), ) params = dict(request.url.params) assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash request = self.client._build_request( FinalRequestOptions( method="post", url="/foo", **make_request_options( query={"foo": "1"}, extra_query={"foo": "2"}, ), ), ) params = dict(request.url.params) assert params == {"foo": "2"} def test_multipart_repeating_array(self, async_client: AsyncOpenAI) -> None: request = async_client._build_request( FinalRequestOptions.construct( method="get", url="/foo", headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"}, json_data={"array": ["foo", "bar"]}, files=[("foo.txt", b"hello world")], ) ) assert request.read().split(b"\r\n") == [ b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="array[]"', b"", b"foo", b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="array[]"', b"", b"bar", b"--6b7ba517decee4a450543ea6ae821c82", b'Content-Disposition: form-data; name="foo.txt"; filename="upload"', b"Content-Type: application/octet-stream", b"", b"hello world", b"--6b7ba517decee4a450543ea6ae821c82--", b"", ] @pytest.mark.respx(base_url=base_url) async def test_basic_union_response(self, respx_mock: MockRouter) -> None: class Model1(BaseModel): name: str class Model2(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) async def test_union_response_different_types(self, respx_mock: MockRouter) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): foo: int class Model2(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ class Model(BaseModel): foo: int respx_mock.get("/foo").mock( return_value=httpx.Response( 200, content=json.dumps({"foo": 2}), headers={"Content-Type": "application/text"}, ) ) response = await self.client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 def test_base_url_setter(self) -> None: client = AsyncOpenAI( base_url="https://example.com/from_init", api_key=api_key, _strict_response_validation=True ) assert client.base_url == "https://example.com/from_init/" client.base_url = "https://example.com/from_setter" # type: ignore[assignment] assert client.base_url == "https://example.com/from_setter/" def test_base_url_env(self) -> None: with update_env(OPENAI_BASE_URL="http://localhost:5000/from/env"): client = AsyncOpenAI(api_key=api_key, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @pytest.mark.parametrize( "client", [ AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), ], ids=["standard", "custom http client"], ) def test_base_url_trailing_slash(self, client: AsyncOpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, ), ) assert request.url == "http://localhost:5000/custom/path/foo" @pytest.mark.parametrize( "client", [ AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), ], ids=["standard", "custom http client"], ) def test_base_url_no_trailing_slash(self, client: AsyncOpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="/foo", json_data={"foo": "bar"}, ), ) assert request.url == "http://localhost:5000/custom/path/foo" @pytest.mark.parametrize( "client", [ AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True ), AsyncOpenAI( base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True, http_client=httpx.AsyncClient(), ), ], ids=["standard", "custom http client"], ) def test_absolute_request_url(self, client: AsyncOpenAI) -> None: request = client._build_request( FinalRequestOptions( method="post", url="https://myapi.com/foo", json_data={"foo": "bar"}, ), ) assert request.url == "https://myapi.com/foo" async def test_copied_client_does_not_close_http(self) -> None: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) assert not client.is_closed() copied = client.copy() assert copied is not client del copied await asyncio.sleep(0.2) assert not client.is_closed() async def test_client_context_manager(self) -> None: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) async with client as c2: assert c2 is client assert not c2.is_closed() assert not client.is_closed() assert client.is_closed() @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: await self.client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_default_stream_cls(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) stream = await self.client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]) assert isinstance(stream, AsyncStream) await stream.response.aclose() @pytest.mark.respx(base_url=base_url) @pytest.mark.asyncio async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format")) strict_client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=False) response = await client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ [3, "20", 20], [3, "0", 0.5], [3, "-10", 0.5], [3, "60", 60], [3, "61", 0.5], [3, "Fri, 29 Sep 2023 16:26:57 GMT", 20], [3, "Fri, 29 Sep 2023 16:26:37 GMT", 0.5], [3, "Fri, 29 Sep 2023 16:26:27 GMT", 0.5], [3, "Fri, 29 Sep 2023 16:27:37 GMT", 60], [3, "Fri, 29 Sep 2023 16:27:38 GMT", 0.5], [3, "99999999999999999999999999999999999", 0.5], [3, "Zun, 29 Sep 2023 16:26:27 GMT", 0.5], [3, "", 0.5], [2, "", 0.5 * 2.0], [1, "", 0.5 * 4.0], ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) @pytest.mark.asyncio async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: client = AsyncOpenAI(base_url=base_url, api_key=api_key, _strict_response_validation=True) headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error")) with pytest.raises(APITimeoutError): await self.client.post( "/chat/completions", body=cast( object, dict( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ), ), cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 @mock.patch("openai._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None: respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500)) with pytest.raises(APIStatusError): await self.client.post( "/chat/completions", body=cast( object, dict( messages=[ { "role": "user", "content": "Say this is a test", } ], model="gpt-3.5-turbo", ), ), cast_to=httpx.Response, options={"headers": {RAW_RESPONSE_HEADER: "stream"}}, ) assert _get_open_connections(self.client) == 0 openai-python-1.12.0/tests/test_deepcopy.py000066400000000000000000000030341456126711000207530ustar00rootroot00000000000000from openai._utils import deepcopy_minimal def assert_different_identities(obj1: object, obj2: object) -> None: assert obj1 == obj2 assert id(obj1) != id(obj2) def test_simple_dict() -> None: obj1 = {"foo": "bar"} obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) def test_nested_dict() -> None: obj1 = {"foo": {"bar": True}} obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) assert_different_identities(obj1["foo"], obj2["foo"]) def test_complex_nested_dict() -> None: obj1 = {"foo": {"bar": [{"hello": "world"}]}} obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) assert_different_identities(obj1["foo"], obj2["foo"]) assert_different_identities(obj1["foo"]["bar"], obj2["foo"]["bar"]) assert_different_identities(obj1["foo"]["bar"][0], obj2["foo"]["bar"][0]) def test_simple_list() -> None: obj1 = ["a", "b", "c"] obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) def test_nested_list() -> None: obj1 = ["a", [1, 2, 3]] obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) assert_different_identities(obj1[1], obj2[1]) class MyObject: ... def test_ignores_other_types() -> None: # custom classes my_obj = MyObject() obj1 = {"foo": my_obj} obj2 = deepcopy_minimal(obj1) assert_different_identities(obj1, obj2) assert obj1["foo"] is my_obj # tuples obj3 = ("a", "b") obj4 = deepcopy_minimal(obj3) assert obj3 is obj4 openai-python-1.12.0/tests/test_extract_files.py000066400000000000000000000036231456126711000220030ustar00rootroot00000000000000from __future__ import annotations from typing import Sequence import pytest from openai._types import FileTypes from openai._utils import extract_files def test_removes_files_from_input() -> None: query = {"foo": "bar"} assert extract_files(query, paths=[]) == [] assert query == {"foo": "bar"} query2 = {"foo": b"Bar", "hello": "world"} assert extract_files(query2, paths=[["foo"]]) == [("foo", b"Bar")] assert query2 == {"hello": "world"} query3 = {"foo": {"foo": {"bar": b"Bar"}}, "hello": "world"} assert extract_files(query3, paths=[["foo", "foo", "bar"]]) == [("foo[foo][bar]", b"Bar")] assert query3 == {"foo": {"foo": {}}, "hello": "world"} query4 = {"foo": {"bar": b"Bar", "baz": "foo"}, "hello": "world"} assert extract_files(query4, paths=[["foo", "bar"]]) == [("foo[bar]", b"Bar")] assert query4 == {"hello": "world", "foo": {"baz": "foo"}} def test_multiple_files() -> None: query = {"documents": [{"file": b"My first file"}, {"file": b"My second file"}]} assert extract_files(query, paths=[["documents", "", "file"]]) == [ ("documents[][file]", b"My first file"), ("documents[][file]", b"My second file"), ] assert query == {"documents": [{}, {}]} @pytest.mark.parametrize( "query,paths,expected", [ [ {"foo": {"bar": "baz"}}, [["foo", "", "bar"]], [], ], [ {"foo": ["bar", "baz"]}, [["foo", "bar"]], [], ], [ {"foo": {"bar": "baz"}}, [["foo", "foo"]], [], ], ], ids=["dict expecting array", "array expecting dict", "unknown keys"], ) def test_ignores_incorrect_paths( query: dict[str, object], paths: Sequence[Sequence[str]], expected: list[tuple[str, FileTypes]], ) -> None: assert extract_files(query, paths=paths) == expected openai-python-1.12.0/tests/test_files.py000066400000000000000000000030501456126711000202430ustar00rootroot00000000000000from pathlib import Path import anyio import pytest from dirty_equals import IsDict, IsList, IsBytes, IsTuple from openai._files import to_httpx_files, async_to_httpx_files readme_path = Path(__file__).parent.parent.joinpath("README.md") def test_pathlib_includes_file_name() -> None: result = to_httpx_files({"file": readme_path}) print(result) assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) def test_tuple_input() -> None: result = to_httpx_files([("file", readme_path)]) print(result) assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) @pytest.mark.asyncio async def test_async_pathlib_includes_file_name() -> None: result = await async_to_httpx_files({"file": readme_path}) print(result) assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) @pytest.mark.asyncio async def test_async_supports_anyio_path() -> None: result = await async_to_httpx_files({"file": anyio.Path(readme_path)}) print(result) assert result == IsDict({"file": IsTuple("README.md", IsBytes())}) @pytest.mark.asyncio async def test_async_tuple_input() -> None: result = await async_to_httpx_files([("file", readme_path)]) print(result) assert result == IsList(IsTuple("file", IsTuple("README.md", IsBytes()))) def test_string_not_allowed() -> None: with pytest.raises(TypeError, match="Expected file types input to be a FileContent type or to be a tuple"): to_httpx_files( { "file": "foo", # type: ignore } ) openai-python-1.12.0/tests/test_legacy_response.py000066400000000000000000000032561456126711000223330ustar00rootroot00000000000000import json import httpx import pytest import pydantic from openai import OpenAI, BaseModel from openai._streaming import Stream from openai._base_client import FinalRequestOptions from openai._legacy_response import LegacyAPIResponse class PydanticModel(pydantic.BaseModel): ... def test_response_parse_mismatched_basemodel(client: OpenAI) -> None: response = LegacyAPIResponse( raw=httpx.Response(200, content=b"foo"), client=client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) with pytest.raises( TypeError, match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`", ): response.parse(to=PydanticModel) def test_response_parse_custom_stream(client: OpenAI) -> None: response = LegacyAPIResponse( raw=httpx.Response(200, content=b"foo"), client=client, stream=True, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) stream = response.parse(to=Stream[int]) assert stream._cast_to == int class CustomModel(BaseModel): foo: str bar: int def test_response_parse_custom_model(client: OpenAI) -> None: response = LegacyAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) obj = response.parse(to=CustomModel) assert obj.foo == "hello!" assert obj.bar == 2 openai-python-1.12.0/tests/test_models.py000066400000000000000000000376361456126711000204450ustar00rootroot00000000000000import json from typing import Any, Dict, List, Union, Optional, cast from datetime import datetime, timezone from typing_extensions import Literal import pytest import pydantic from pydantic import Field from openai._compat import PYDANTIC_V2, parse_obj, model_dump, model_json from openai._models import BaseModel class BasicModel(BaseModel): foo: str @pytest.mark.parametrize("value", ["hello", 1], ids=["correct type", "mismatched"]) def test_basic(value: object) -> None: m = BasicModel.construct(foo=value) assert m.foo == value def test_directly_nested_model() -> None: class NestedModel(BaseModel): nested: BasicModel m = NestedModel.construct(nested={"foo": "Foo!"}) assert m.nested.foo == "Foo!" # mismatched types m = NestedModel.construct(nested="hello!") assert m.nested == "hello!" def test_optional_nested_model() -> None: class NestedModel(BaseModel): nested: Optional[BasicModel] m1 = NestedModel.construct(nested=None) assert m1.nested is None m2 = NestedModel.construct(nested={"foo": "bar"}) assert m2.nested is not None assert m2.nested.foo == "bar" # mismatched types m3 = NestedModel.construct(nested={"foo"}) assert isinstance(cast(Any, m3.nested), set) assert m3.nested == {"foo"} def test_list_nested_model() -> None: class NestedModel(BaseModel): nested: List[BasicModel] m = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) assert m.nested is not None assert isinstance(m.nested, list) assert len(m.nested) == 2 assert m.nested[0].foo == "bar" assert m.nested[1].foo == "2" # mismatched types m = NestedModel.construct(nested=True) assert cast(Any, m.nested) is True m = NestedModel.construct(nested=[False]) assert cast(Any, m.nested) == [False] def test_optional_list_nested_model() -> None: class NestedModel(BaseModel): nested: Optional[List[BasicModel]] m1 = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}]) assert m1.nested is not None assert isinstance(m1.nested, list) assert len(m1.nested) == 2 assert m1.nested[0].foo == "bar" assert m1.nested[1].foo == "2" m2 = NestedModel.construct(nested=None) assert m2.nested is None # mismatched types m3 = NestedModel.construct(nested={1}) assert cast(Any, m3.nested) == {1} m4 = NestedModel.construct(nested=[False]) assert cast(Any, m4.nested) == [False] def test_list_optional_items_nested_model() -> None: class NestedModel(BaseModel): nested: List[Optional[BasicModel]] m = NestedModel.construct(nested=[None, {"foo": "bar"}]) assert m.nested is not None assert isinstance(m.nested, list) assert len(m.nested) == 2 assert m.nested[0] is None assert m.nested[1] is not None assert m.nested[1].foo == "bar" # mismatched types m3 = NestedModel.construct(nested="foo") assert cast(Any, m3.nested) == "foo" m4 = NestedModel.construct(nested=[False]) assert cast(Any, m4.nested) == [False] def test_list_mismatched_type() -> None: class NestedModel(BaseModel): nested: List[str] m = NestedModel.construct(nested=False) assert cast(Any, m.nested) is False def test_raw_dictionary() -> None: class NestedModel(BaseModel): nested: Dict[str, str] m = NestedModel.construct(nested={"hello": "world"}) assert m.nested == {"hello": "world"} # mismatched types m = NestedModel.construct(nested=False) assert cast(Any, m.nested) is False def test_nested_dictionary_model() -> None: class NestedModel(BaseModel): nested: Dict[str, BasicModel] m = NestedModel.construct(nested={"hello": {"foo": "bar"}}) assert isinstance(m.nested, dict) assert m.nested["hello"].foo == "bar" # mismatched types m = NestedModel.construct(nested={"hello": False}) assert cast(Any, m.nested["hello"]) is False def test_unknown_fields() -> None: m1 = BasicModel.construct(foo="foo", unknown=1) assert m1.foo == "foo" assert cast(Any, m1).unknown == 1 m2 = BasicModel.construct(foo="foo", unknown={"foo_bar": True}) assert m2.foo == "foo" assert cast(Any, m2).unknown == {"foo_bar": True} assert model_dump(m2) == {"foo": "foo", "unknown": {"foo_bar": True}} def test_strict_validation_unknown_fields() -> None: class Model(BaseModel): foo: str model = parse_obj(Model, dict(foo="hello!", user="Robert")) assert model.foo == "hello!" assert cast(Any, model).user == "Robert" assert model_dump(model) == {"foo": "hello!", "user": "Robert"} def test_aliases() -> None: class Model(BaseModel): my_field: int = Field(alias="myField") m = Model.construct(myField=1) assert m.my_field == 1 # mismatched types m = Model.construct(myField={"hello": False}) assert cast(Any, m.my_field) == {"hello": False} def test_repr() -> None: model = BasicModel(foo="bar") assert str(model) == "BasicModel(foo='bar')" assert repr(model) == "BasicModel(foo='bar')" def test_repr_nested_model() -> None: class Child(BaseModel): name: str age: int class Parent(BaseModel): name: str child: Child model = Parent(name="Robert", child=Child(name="Foo", age=5)) assert str(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" assert repr(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))" def test_optional_list() -> None: class Submodel(BaseModel): name: str class Model(BaseModel): items: Optional[List[Submodel]] m = Model.construct(items=None) assert m.items is None m = Model.construct(items=[]) assert m.items == [] m = Model.construct(items=[{"name": "Robert"}]) assert m.items is not None assert len(m.items) == 1 assert m.items[0].name == "Robert" def test_nested_union_of_models() -> None: class Submodel1(BaseModel): bar: bool class Submodel2(BaseModel): thing: str class Model(BaseModel): foo: Union[Submodel1, Submodel2] m = Model.construct(foo={"thing": "hello"}) assert isinstance(m.foo, Submodel2) assert m.foo.thing == "hello" def test_nested_union_of_mixed_types() -> None: class Submodel1(BaseModel): bar: bool class Model(BaseModel): foo: Union[Submodel1, Literal[True], Literal["CARD_HOLDER"]] m = Model.construct(foo=True) assert m.foo is True m = Model.construct(foo="CARD_HOLDER") assert m.foo is "CARD_HOLDER" m = Model.construct(foo={"bar": False}) assert isinstance(m.foo, Submodel1) assert m.foo.bar is False def test_nested_union_multiple_variants() -> None: class Submodel1(BaseModel): bar: bool class Submodel2(BaseModel): thing: str class Submodel3(BaseModel): foo: int class Model(BaseModel): foo: Union[Submodel1, Submodel2, None, Submodel3] m = Model.construct(foo={"thing": "hello"}) assert isinstance(m.foo, Submodel2) assert m.foo.thing == "hello" m = Model.construct(foo=None) assert m.foo is None m = Model.construct() assert m.foo is None m = Model.construct(foo={"foo": "1"}) assert isinstance(m.foo, Submodel3) assert m.foo.foo == 1 def test_nested_union_invalid_data() -> None: class Submodel1(BaseModel): level: int class Submodel2(BaseModel): name: str class Model(BaseModel): foo: Union[Submodel1, Submodel2] m = Model.construct(foo=True) assert cast(bool, m.foo) is True m = Model.construct(foo={"name": 3}) if PYDANTIC_V2: assert isinstance(m.foo, Submodel1) assert m.foo.name == 3 # type: ignore else: assert isinstance(m.foo, Submodel2) assert m.foo.name == "3" def test_list_of_unions() -> None: class Submodel1(BaseModel): level: int class Submodel2(BaseModel): name: str class Model(BaseModel): items: List[Union[Submodel1, Submodel2]] m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) assert len(m.items) == 2 assert isinstance(m.items[0], Submodel1) assert m.items[0].level == 1 assert isinstance(m.items[1], Submodel2) assert m.items[1].name == "Robert" m = Model.construct(items=[{"level": -1}, 156]) assert len(m.items) == 2 assert isinstance(m.items[0], Submodel1) assert m.items[0].level == -1 assert m.items[1] == 156 def test_union_of_lists() -> None: class SubModel1(BaseModel): level: int class SubModel2(BaseModel): name: str class Model(BaseModel): items: Union[List[SubModel1], List[SubModel2]] # with one valid entry m = Model.construct(items=[{"name": "Robert"}]) assert len(m.items) == 1 assert isinstance(m.items[0], SubModel2) assert m.items[0].name == "Robert" # with two entries pointing to different types m = Model.construct(items=[{"level": 1}, {"name": "Robert"}]) assert len(m.items) == 2 assert isinstance(m.items[0], SubModel1) assert m.items[0].level == 1 assert isinstance(m.items[1], SubModel1) assert cast(Any, m.items[1]).name == "Robert" # with two entries pointing to *completely* different types m = Model.construct(items=[{"level": -1}, 156]) assert len(m.items) == 2 assert isinstance(m.items[0], SubModel1) assert m.items[0].level == -1 assert m.items[1] == 156 def test_dict_of_union() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): foo: str class Model(BaseModel): data: Dict[str, Union[SubModel1, SubModel2]] m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) assert len(list(m.data.keys())) == 2 assert isinstance(m.data["hello"], SubModel1) assert m.data["hello"].name == "there" assert isinstance(m.data["foo"], SubModel2) assert m.data["foo"].foo == "bar" # TODO: test mismatched type def test_double_nested_union() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): bar: str class Model(BaseModel): data: Dict[str, List[Union[SubModel1, SubModel2]]] m = Model.construct(data={"foo": [{"bar": "baz"}, {"name": "Robert"}]}) assert len(m.data["foo"]) == 2 entry1 = m.data["foo"][0] assert isinstance(entry1, SubModel2) assert entry1.bar == "baz" entry2 = m.data["foo"][1] assert isinstance(entry2, SubModel1) assert entry2.name == "Robert" # TODO: test mismatched type def test_union_of_dict() -> None: class SubModel1(BaseModel): name: str class SubModel2(BaseModel): foo: str class Model(BaseModel): data: Union[Dict[str, SubModel1], Dict[str, SubModel2]] m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}}) assert len(list(m.data.keys())) == 2 assert isinstance(m.data["hello"], SubModel1) assert m.data["hello"].name == "there" assert isinstance(m.data["foo"], SubModel1) assert cast(Any, m.data["foo"]).foo == "bar" def test_iso8601_datetime() -> None: class Model(BaseModel): created_at: datetime expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc) if PYDANTIC_V2: expected_json = '{"created_at":"2019-12-27T18:11:19.117000Z"}' else: expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}' model = Model.construct(created_at="2019-12-27T18:11:19.117Z") assert model.created_at == expected assert model_json(model) == expected_json model = parse_obj(Model, dict(created_at="2019-12-27T18:11:19.117Z")) assert model.created_at == expected assert model_json(model) == expected_json def test_does_not_coerce_int() -> None: class Model(BaseModel): bar: int assert Model.construct(bar=1).bar == 1 assert Model.construct(bar=10.9).bar == 10.9 assert Model.construct(bar="19").bar == "19" # type: ignore[comparison-overlap] assert Model.construct(bar=False).bar is False def test_int_to_float_safe_conversion() -> None: class Model(BaseModel): float_field: float m = Model.construct(float_field=10) assert m.float_field == 10.0 assert isinstance(m.float_field, float) m = Model.construct(float_field=10.12) assert m.float_field == 10.12 assert isinstance(m.float_field, float) # number too big m = Model.construct(float_field=2**53 + 1) assert m.float_field == 2**53 + 1 assert isinstance(m.float_field, int) def test_deprecated_alias() -> None: class Model(BaseModel): resource_id: str = Field(alias="model_id") @property def model_id(self) -> str: return self.resource_id m = Model.construct(model_id="id") assert m.model_id == "id" assert m.resource_id == "id" assert m.resource_id is m.model_id m = parse_obj(Model, {"model_id": "id"}) assert m.model_id == "id" assert m.resource_id == "id" assert m.resource_id is m.model_id def test_omitted_fields() -> None: class Model(BaseModel): resource_id: Optional[str] = None m = Model.construct() assert "resource_id" not in m.model_fields_set m = Model.construct(resource_id=None) assert "resource_id" in m.model_fields_set m = Model.construct(resource_id="foo") assert "resource_id" in m.model_fields_set def test_forwards_compat_model_dump_method() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert m.model_dump() == {"foo": "hello"} assert m.model_dump(include={"bar"}) == {} assert m.model_dump(exclude={"foo"}) == {} assert m.model_dump(by_alias=True) == {"FOO": "hello"} m2 = Model() assert m2.model_dump() == {"foo": None} assert m2.model_dump(exclude_unset=True) == {} assert m2.model_dump(exclude_none=True) == {} assert m2.model_dump(exclude_defaults=True) == {} m3 = Model(FOO=None) assert m3.model_dump() == {"foo": None} assert m3.model_dump(exclude_none=True) == {} if not PYDANTIC_V2: with pytest.raises(ValueError, match="mode is only supported in Pydantic v2"): m.model_dump(mode="json") with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump(round_trip=True) with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.model_dump(warnings=False) def test_forwards_compat_model_dump_json_method() -> None: class Model(BaseModel): foo: Optional[str] = Field(alias="FOO", default=None) m = Model(FOO="hello") assert json.loads(m.model_dump_json()) == {"foo": "hello"} assert json.loads(m.model_dump_json(include={"bar"})) == {} assert json.loads(m.model_dump_json(include={"foo"})) == {"foo": "hello"} assert json.loads(m.model_dump_json(by_alias=True)) == {"FOO": "hello"} assert m.model_dump_json(indent=2) == '{\n "foo": "hello"\n}' m2 = Model() assert json.loads(m2.model_dump_json()) == {"foo": None} assert json.loads(m2.model_dump_json(exclude_unset=True)) == {} assert json.loads(m2.model_dump_json(exclude_none=True)) == {} assert json.loads(m2.model_dump_json(exclude_defaults=True)) == {} m3 = Model(FOO=None) assert json.loads(m3.model_dump_json()) == {"foo": None} assert json.loads(m3.model_dump_json(exclude_none=True)) == {} if not PYDANTIC_V2: with pytest.raises(ValueError, match="round_trip is only supported in Pydantic v2"): m.model_dump_json(round_trip=True) with pytest.raises(ValueError, match="warnings is only supported in Pydantic v2"): m.model_dump_json(warnings=False) def test_type_compat() -> None: # our model type can be assigned to Pydantic's model type def takes_pydantic(model: pydantic.BaseModel) -> None: # noqa: ARG001 ... class OurModel(BaseModel): foo: Optional[str] = None takes_pydantic(OurModel()) openai-python-1.12.0/tests/test_module_client.py000066400000000000000000000125151456126711000217720ustar00rootroot00000000000000# File generated from our OpenAPI spec by Stainless. from __future__ import annotations import os as _os import httpx import pytest from httpx import URL import openai from openai import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES def reset_state() -> None: openai._reset_client() openai.api_key = None or "My API Key" openai.organization = None openai.base_url = None openai.timeout = DEFAULT_TIMEOUT openai.max_retries = DEFAULT_MAX_RETRIES openai.default_headers = None openai.default_query = None openai.http_client = None openai.api_type = _os.environ.get("OPENAI_API_TYPE") # type: ignore openai.api_version = None openai.azure_endpoint = None openai.azure_ad_token = None openai.azure_ad_token_provider = None @pytest.fixture(autouse=True) def reset_state_fixture() -> None: reset_state() def test_base_url_option() -> None: assert openai.base_url is None assert openai.completions._client.base_url == URL("https://api.openai.com/v1/") openai.base_url = "http://foo.com" assert openai.base_url == URL("http://foo.com") assert openai.completions._client.base_url == URL("http://foo.com") def test_timeout_option() -> None: assert openai.timeout == openai.DEFAULT_TIMEOUT assert openai.completions._client.timeout == openai.DEFAULT_TIMEOUT openai.timeout = 3 assert openai.timeout == 3 assert openai.completions._client.timeout == 3 def test_max_retries_option() -> None: assert openai.max_retries == openai.DEFAULT_MAX_RETRIES assert openai.completions._client.max_retries == openai.DEFAULT_MAX_RETRIES openai.max_retries = 1 assert openai.max_retries == 1 assert openai.completions._client.max_retries == 1 def test_default_headers_option() -> None: assert openai.default_headers == None openai.default_headers = {"Foo": "Bar"} assert openai.default_headers["Foo"] == "Bar" assert openai.completions._client.default_headers["Foo"] == "Bar" def test_default_query_option() -> None: assert openai.default_query is None assert openai.completions._client._custom_query == {} openai.default_query = {"Foo": {"nested": 1}} assert openai.default_query["Foo"] == {"nested": 1} assert openai.completions._client._custom_query["Foo"] == {"nested": 1} def test_http_client_option() -> None: assert openai.http_client is None original_http_client = openai.completions._client._client assert original_http_client is not None new_client = httpx.Client() openai.http_client = new_client assert openai.completions._client._client is new_client import contextlib from typing import Iterator from openai.lib.azure import AzureOpenAI @contextlib.contextmanager def fresh_env() -> Iterator[None]: old = _os.environ.copy() try: _os.environ.clear() yield finally: _os.environ.update(old) def test_only_api_key_results_in_openai_api() -> None: with fresh_env(): openai.api_type = None openai.api_key = "example API key" assert type(openai.completions._client).__name__ == "_ModuleClient" def test_azure_api_key_env_without_api_version() -> None: with fresh_env(): openai.api_type = None _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" with pytest.raises( ValueError, match=r"Must provide either the `api_version` argument or the `OPENAI_API_VERSION` environment variable", ): openai.completions._client # noqa: B018 def test_azure_api_key_and_version_env() -> None: with fresh_env(): openai.api_type = None _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" _os.environ["OPENAI_API_VERSION"] = "example-version" with pytest.raises( ValueError, match=r"Must provide one of the `base_url` or `azure_endpoint` arguments, or the `AZURE_OPENAI_ENDPOINT` environment variable", ): openai.completions._client # noqa: B018 def test_azure_api_key_version_and_endpoint_env() -> None: with fresh_env(): openai.api_type = None _os.environ["AZURE_OPENAI_API_KEY"] = "example API key" _os.environ["OPENAI_API_VERSION"] = "example-version" _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example" openai.completions._client # noqa: B018 assert openai.api_type == "azure" def test_azure_azure_ad_token_version_and_endpoint_env() -> None: with fresh_env(): openai.api_type = None _os.environ["AZURE_OPENAI_AD_TOKEN"] = "example AD token" _os.environ["OPENAI_API_VERSION"] = "example-version" _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example" client = openai.completions._client assert isinstance(client, AzureOpenAI) assert client._azure_ad_token == "example AD token" def test_azure_azure_ad_token_provider_version_and_endpoint_env() -> None: with fresh_env(): openai.api_type = None _os.environ["OPENAI_API_VERSION"] = "example-version" _os.environ["AZURE_OPENAI_ENDPOINT"] = "https://www.example" openai.azure_ad_token_provider = lambda: "token" client = openai.completions._client assert isinstance(client, AzureOpenAI) assert client._azure_ad_token_provider is not None assert client._azure_ad_token_provider() == "token" openai-python-1.12.0/tests/test_qs.py000066400000000000000000000062001456126711000175640ustar00rootroot00000000000000from typing import Any, cast from functools import partial from urllib.parse import unquote import pytest from openai._qs import Querystring, stringify def test_empty() -> None: assert stringify({}) == "" assert stringify({"a": {}}) == "" assert stringify({"a": {"b": {"c": {}}}}) == "" def test_basic() -> None: assert stringify({"a": 1}) == "a=1" assert stringify({"a": "b"}) == "a=b" assert stringify({"a": True}) == "a=true" assert stringify({"a": False}) == "a=false" assert stringify({"a": 1.23456}) == "a=1.23456" assert stringify({"a": None}) == "" @pytest.mark.parametrize("method", ["class", "function"]) def test_nested_dotted(method: str) -> None: if method == "class": serialise = Querystring(nested_format="dots").stringify else: serialise = partial(stringify, nested_format="dots") assert unquote(serialise({"a": {"b": "c"}})) == "a.b=c" assert unquote(serialise({"a": {"b": "c", "d": "e", "f": "g"}})) == "a.b=c&a.d=e&a.f=g" assert unquote(serialise({"a": {"b": {"c": {"d": "e"}}}})) == "a.b.c.d=e" assert unquote(serialise({"a": {"b": True}})) == "a.b=true" def test_nested_brackets() -> None: assert unquote(stringify({"a": {"b": "c"}})) == "a[b]=c" assert unquote(stringify({"a": {"b": "c", "d": "e", "f": "g"}})) == "a[b]=c&a[d]=e&a[f]=g" assert unquote(stringify({"a": {"b": {"c": {"d": "e"}}}})) == "a[b][c][d]=e" assert unquote(stringify({"a": {"b": True}})) == "a[b]=true" @pytest.mark.parametrize("method", ["class", "function"]) def test_array_comma(method: str) -> None: if method == "class": serialise = Querystring(array_format="comma").stringify else: serialise = partial(stringify, array_format="comma") assert unquote(serialise({"in": ["foo", "bar"]})) == "in=foo,bar" assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b]=true,false" assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b]=true,false,true" def test_array_repeat() -> None: assert unquote(stringify({"in": ["foo", "bar"]})) == "in=foo&in=bar" assert unquote(stringify({"a": {"b": [True, False]}})) == "a[b]=true&a[b]=false" assert unquote(stringify({"a": {"b": [True, False, None, True]}})) == "a[b]=true&a[b]=false&a[b]=true" assert unquote(stringify({"in": ["foo", {"b": {"c": ["d", "e"]}}]})) == "in=foo&in[b][c]=d&in[b][c]=e" @pytest.mark.parametrize("method", ["class", "function"]) def test_array_brackets(method: str) -> None: if method == "class": serialise = Querystring(array_format="brackets").stringify else: serialise = partial(stringify, array_format="brackets") assert unquote(serialise({"in": ["foo", "bar"]})) == "in[]=foo&in[]=bar" assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b][]=true&a[b][]=false" assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b][]=true&a[b][]=false&a[b][]=true" def test_unknown_array_format() -> None: with pytest.raises(NotImplementedError, match="Unknown array_format value: foo, choose from comma, repeat"): stringify({"a": ["foo", "bar"]}, array_format=cast(Any, "foo")) openai-python-1.12.0/tests/test_required_args.py000066400000000000000000000057611456126711000220100ustar00rootroot00000000000000from __future__ import annotations import pytest from openai._utils import required_args def test_too_many_positional_params() -> None: @required_args(["a"]) def foo(a: str | None = None) -> str | None: return a with pytest.raises(TypeError, match=r"foo\(\) takes 1 argument\(s\) but 2 were given"): foo("a", "b") # type: ignore def test_positional_param() -> None: @required_args(["a"]) def foo(a: str | None = None) -> str | None: return a assert foo("a") == "a" assert foo(None) is None assert foo(a="b") == "b" with pytest.raises(TypeError, match="Missing required argument: 'a'"): foo() def test_keyword_only_param() -> None: @required_args(["a"]) def foo(*, a: str | None = None) -> str | None: return a assert foo(a="a") == "a" assert foo(a=None) is None assert foo(a="b") == "b" with pytest.raises(TypeError, match="Missing required argument: 'a'"): foo() def test_multiple_params() -> None: @required_args(["a", "b", "c"]) def foo(a: str = "", *, b: str = "", c: str = "") -> str | None: return f"{a} {b} {c}" assert foo(a="a", b="b", c="c") == "a b c" error_message = r"Missing required arguments.*" with pytest.raises(TypeError, match=error_message): foo() with pytest.raises(TypeError, match=error_message): foo(a="a") with pytest.raises(TypeError, match=error_message): foo(b="b") with pytest.raises(TypeError, match=error_message): foo(c="c") with pytest.raises(TypeError, match=r"Missing required argument: 'a'"): foo(b="a", c="c") with pytest.raises(TypeError, match=r"Missing required argument: 'b'"): foo("a", c="c") def test_multiple_variants() -> None: @required_args(["a"], ["b"]) def foo(*, a: str | None = None, b: str | None = None) -> str | None: return a if a is not None else b assert foo(a="foo") == "foo" assert foo(b="bar") == "bar" assert foo(a=None) is None assert foo(b=None) is None # TODO: this error message could probably be improved with pytest.raises( TypeError, match=r"Missing required arguments; Expected either \('a'\) or \('b'\) arguments to be given", ): foo() def test_multiple_params_multiple_variants() -> None: @required_args(["a", "b"], ["c"]) def foo(*, a: str | None = None, b: str | None = None, c: str | None = None) -> str | None: if a is not None: return a if b is not None: return b return c error_message = r"Missing required arguments; Expected either \('a' and 'b'\) or \('c'\) arguments to be given" with pytest.raises(TypeError, match=error_message): foo(a="foo") with pytest.raises(TypeError, match=error_message): foo(b="bar") with pytest.raises(TypeError, match=error_message): foo() assert foo(a=None, b="bar") == "bar" assert foo(c=None) is None assert foo(c="foo") == "foo" openai-python-1.12.0/tests/test_response.py000066400000000000000000000107341456126711000210060ustar00rootroot00000000000000import json from typing import List import httpx import pytest import pydantic from openai import OpenAI, BaseModel, AsyncOpenAI from openai._response import ( APIResponse, BaseAPIResponse, AsyncAPIResponse, BinaryAPIResponse, AsyncBinaryAPIResponse, extract_response_type, ) from openai._streaming import Stream from openai._base_client import FinalRequestOptions class ConcreteBaseAPIResponse(APIResponse[bytes]): ... class ConcreteAPIResponse(APIResponse[List[str]]): ... class ConcreteAsyncAPIResponse(APIResponse[httpx.Response]): ... def test_extract_response_type_direct_classes() -> None: assert extract_response_type(BaseAPIResponse[str]) == str assert extract_response_type(APIResponse[str]) == str assert extract_response_type(AsyncAPIResponse[str]) == str def test_extract_response_type_direct_class_missing_type_arg() -> None: with pytest.raises( RuntimeError, match="Expected type to have a type argument at index 0 but it did not", ): extract_response_type(AsyncAPIResponse) def test_extract_response_type_concrete_subclasses() -> None: assert extract_response_type(ConcreteBaseAPIResponse) == bytes assert extract_response_type(ConcreteAPIResponse) == List[str] assert extract_response_type(ConcreteAsyncAPIResponse) == httpx.Response def test_extract_response_type_binary_response() -> None: assert extract_response_type(BinaryAPIResponse) == bytes assert extract_response_type(AsyncBinaryAPIResponse) == bytes class PydanticModel(pydantic.BaseModel): ... def test_response_parse_mismatched_basemodel(client: OpenAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) with pytest.raises( TypeError, match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`", ): response.parse(to=PydanticModel) @pytest.mark.asyncio async def test_async_response_parse_mismatched_basemodel(async_client: AsyncOpenAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) with pytest.raises( TypeError, match="Pydantic models must subclass our base model type, e.g. `from openai import BaseModel`", ): await response.parse(to=PydanticModel) def test_response_parse_custom_stream(client: OpenAI) -> None: response = APIResponse( raw=httpx.Response(200, content=b"foo"), client=client, stream=True, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) stream = response.parse(to=Stream[int]) assert stream._cast_to == int @pytest.mark.asyncio async def test_async_response_parse_custom_stream(async_client: AsyncOpenAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=b"foo"), client=async_client, stream=True, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) stream = await response.parse(to=Stream[int]) assert stream._cast_to == int class CustomModel(BaseModel): foo: str bar: int def test_response_parse_custom_model(client: OpenAI) -> None: response = APIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) obj = response.parse(to=CustomModel) assert obj.foo == "hello!" assert obj.bar == 2 @pytest.mark.asyncio async def test_async_response_parse_custom_model(async_client: AsyncOpenAI) -> None: response = AsyncAPIResponse( raw=httpx.Response(200, content=json.dumps({"foo": "hello!", "bar": 2})), client=async_client, stream=False, stream_cls=None, cast_to=str, options=FinalRequestOptions.construct(method="get", url="/foo"), ) obj = await response.parse(to=CustomModel) assert obj.foo == "hello!" assert obj.bar == 2 openai-python-1.12.0/tests/test_streaming.py000066400000000000000000000043761456126711000211460ustar00rootroot00000000000000from typing import Iterator, AsyncIterator import pytest from openai._streaming import SSEDecoder @pytest.mark.asyncio async def test_basic_async() -> None: async def body() -> AsyncIterator[str]: yield "event: completion" yield 'data: {"foo":true}' yield "" async for sse in SSEDecoder().aiter(body()): assert sse.event == "completion" assert sse.json() == {"foo": True} def test_basic() -> None: def body() -> Iterator[str]: yield "event: completion" yield 'data: {"foo":true}' yield "" it = SSEDecoder().iter(body()) sse = next(it) assert sse.event == "completion" assert sse.json() == {"foo": True} with pytest.raises(StopIteration): next(it) def test_data_missing_event() -> None: def body() -> Iterator[str]: yield 'data: {"foo":true}' yield "" it = SSEDecoder().iter(body()) sse = next(it) assert sse.event is None assert sse.json() == {"foo": True} with pytest.raises(StopIteration): next(it) def test_event_missing_data() -> None: def body() -> Iterator[str]: yield "event: ping" yield "" it = SSEDecoder().iter(body()) sse = next(it) assert sse.event == "ping" assert sse.data == "" with pytest.raises(StopIteration): next(it) def test_multiple_events() -> None: def body() -> Iterator[str]: yield "event: ping" yield "" yield "event: completion" yield "" it = SSEDecoder().iter(body()) sse = next(it) assert sse.event == "ping" assert sse.data == "" sse = next(it) assert sse.event == "completion" assert sse.data == "" with pytest.raises(StopIteration): next(it) def test_multiple_events_with_data() -> None: def body() -> Iterator[str]: yield "event: ping" yield 'data: {"foo":true}' yield "" yield "event: completion" yield 'data: {"bar":false}' yield "" it = SSEDecoder().iter(body()) sse = next(it) assert sse.event == "ping" assert sse.json() == {"foo": True} sse = next(it) assert sse.event == "completion" assert sse.json() == {"bar": False} with pytest.raises(StopIteration): next(it) openai-python-1.12.0/tests/test_transform.py000066400000000000000000000233151456126711000211620ustar00rootroot00000000000000from __future__ import annotations from typing import Any, List, Union, Iterable, Optional, cast from datetime import date, datetime from typing_extensions import Required, Annotated, TypedDict import pytest from openai._utils import PropertyInfo, transform, parse_datetime from openai._compat import PYDANTIC_V2 from openai._models import BaseModel class Foo1(TypedDict): foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] def test_top_level_alias() -> None: assert transform({"foo_bar": "hello"}, expected_type=Foo1) == {"fooBar": "hello"} class Foo2(TypedDict): bar: Bar2 class Bar2(TypedDict): this_thing: Annotated[int, PropertyInfo(alias="this__thing")] baz: Annotated[Baz2, PropertyInfo(alias="Baz")] class Baz2(TypedDict): my_baz: Annotated[str, PropertyInfo(alias="myBaz")] def test_recursive_typeddict() -> None: assert transform({"bar": {"this_thing": 1}}, Foo2) == {"bar": {"this__thing": 1}} assert transform({"bar": {"baz": {"my_baz": "foo"}}}, Foo2) == {"bar": {"Baz": {"myBaz": "foo"}}} class Foo3(TypedDict): things: List[Bar3] class Bar3(TypedDict): my_field: Annotated[str, PropertyInfo(alias="myField")] def test_list_of_typeddict() -> None: result = transform({"things": [{"my_field": "foo"}, {"my_field": "foo2"}]}, expected_type=Foo3) assert result == {"things": [{"myField": "foo"}, {"myField": "foo2"}]} class Foo4(TypedDict): foo: Union[Bar4, Baz4] class Bar4(TypedDict): foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] class Baz4(TypedDict): foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] def test_union_of_typeddict() -> None: assert transform({"foo": {"foo_bar": "bar"}}, Foo4) == {"foo": {"fooBar": "bar"}} assert transform({"foo": {"foo_baz": "baz"}}, Foo4) == {"foo": {"fooBaz": "baz"}} assert transform({"foo": {"foo_baz": "baz", "foo_bar": "bar"}}, Foo4) == {"foo": {"fooBaz": "baz", "fooBar": "bar"}} class Foo5(TypedDict): foo: Annotated[Union[Bar4, List[Baz4]], PropertyInfo(alias="FOO")] class Bar5(TypedDict): foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] class Baz5(TypedDict): foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] def test_union_of_list() -> None: assert transform({"foo": {"foo_bar": "bar"}}, Foo5) == {"FOO": {"fooBar": "bar"}} assert transform( { "foo": [ {"foo_baz": "baz"}, {"foo_baz": "baz"}, ] }, Foo5, ) == {"FOO": [{"fooBaz": "baz"}, {"fooBaz": "baz"}]} class Foo6(TypedDict): bar: Annotated[str, PropertyInfo(alias="Bar")] def test_includes_unknown_keys() -> None: assert transform({"bar": "bar", "baz_": {"FOO": 1}}, Foo6) == { "Bar": "bar", "baz_": {"FOO": 1}, } class Foo7(TypedDict): bar: Annotated[List[Bar7], PropertyInfo(alias="bAr")] foo: Bar7 class Bar7(TypedDict): foo: str def test_ignores_invalid_input() -> None: assert transform({"bar": ""}, Foo7) == {"bAr": ""} assert transform({"foo": ""}, Foo7) == {"foo": ""} class DatetimeDict(TypedDict, total=False): foo: Annotated[datetime, PropertyInfo(format="iso8601")] bar: Annotated[Optional[datetime], PropertyInfo(format="iso8601")] required: Required[Annotated[Optional[datetime], PropertyInfo(format="iso8601")]] list_: Required[Annotated[Optional[List[datetime]], PropertyInfo(format="iso8601")]] union: Annotated[Union[int, datetime], PropertyInfo(format="iso8601")] class DateDict(TypedDict, total=False): foo: Annotated[date, PropertyInfo(format="iso8601")] def test_iso8601_format() -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") assert transform({"foo": dt}, DatetimeDict) == {"foo": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] dt = dt.replace(tzinfo=None) assert transform({"foo": dt}, DatetimeDict) == {"foo": "2023-02-23T14:16:36.337692"} # type: ignore[comparison-overlap] assert transform({"foo": None}, DateDict) == {"foo": None} # type: ignore[comparison-overlap] assert transform({"foo": date.fromisoformat("2023-02-23")}, DateDict) == {"foo": "2023-02-23"} # type: ignore[comparison-overlap] def test_optional_iso8601_format() -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") assert transform({"bar": dt}, DatetimeDict) == {"bar": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] assert transform({"bar": None}, DatetimeDict) == {"bar": None} def test_required_iso8601_format() -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") assert transform({"required": dt}, DatetimeDict) == {"required": "2023-02-23T14:16:36.337692+00:00"} # type: ignore[comparison-overlap] assert transform({"required": None}, DatetimeDict) == {"required": None} def test_union_datetime() -> None: dt = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") assert transform({"union": dt}, DatetimeDict) == { # type: ignore[comparison-overlap] "union": "2023-02-23T14:16:36.337692+00:00" } assert transform({"union": "foo"}, DatetimeDict) == {"union": "foo"} def test_nested_list_iso6801_format() -> None: dt1 = datetime.fromisoformat("2023-02-23T14:16:36.337692+00:00") dt2 = parse_datetime("2022-01-15T06:34:23Z") assert transform({"list_": [dt1, dt2]}, DatetimeDict) == { # type: ignore[comparison-overlap] "list_": ["2023-02-23T14:16:36.337692+00:00", "2022-01-15T06:34:23+00:00"] } def test_datetime_custom_format() -> None: dt = parse_datetime("2022-01-15T06:34:23Z") result = transform(dt, Annotated[datetime, PropertyInfo(format="custom", format_template="%H")]) assert result == "06" # type: ignore[comparison-overlap] class DateDictWithRequiredAlias(TypedDict, total=False): required_prop: Required[Annotated[date, PropertyInfo(format="iso8601", alias="prop")]] def test_datetime_with_alias() -> None: assert transform({"required_prop": None}, DateDictWithRequiredAlias) == {"prop": None} # type: ignore[comparison-overlap] assert transform({"required_prop": date.fromisoformat("2023-02-23")}, DateDictWithRequiredAlias) == { "prop": "2023-02-23" } # type: ignore[comparison-overlap] class MyModel(BaseModel): foo: str def test_pydantic_model_to_dictionary() -> None: assert transform(MyModel(foo="hi!"), Any) == {"foo": "hi!"} assert transform(MyModel.construct(foo="hi!"), Any) == {"foo": "hi!"} def test_pydantic_empty_model() -> None: assert transform(MyModel.construct(), Any) == {} def test_pydantic_unknown_field() -> None: assert transform(MyModel.construct(my_untyped_field=True), Any) == {"my_untyped_field": True} def test_pydantic_mismatched_types() -> None: model = MyModel.construct(foo=True) if PYDANTIC_V2: with pytest.warns(UserWarning): params = transform(model, Any) else: params = transform(model, Any) assert params == {"foo": True} def test_pydantic_mismatched_object_type() -> None: model = MyModel.construct(foo=MyModel.construct(hello="world")) if PYDANTIC_V2: with pytest.warns(UserWarning): params = transform(model, Any) else: params = transform(model, Any) assert params == {"foo": {"hello": "world"}} class ModelNestedObjects(BaseModel): nested: MyModel def test_pydantic_nested_objects() -> None: model = ModelNestedObjects.construct(nested={"foo": "stainless"}) assert isinstance(model.nested, MyModel) assert transform(model, Any) == {"nested": {"foo": "stainless"}} class ModelWithDefaultField(BaseModel): foo: str with_none_default: Union[str, None] = None with_str_default: str = "foo" def test_pydantic_default_field() -> None: # should be excluded when defaults are used model = ModelWithDefaultField.construct() assert model.with_none_default is None assert model.with_str_default == "foo" assert transform(model, Any) == {} # should be included when the default value is explicitly given model = ModelWithDefaultField.construct(with_none_default=None, with_str_default="foo") assert model.with_none_default is None assert model.with_str_default == "foo" assert transform(model, Any) == {"with_none_default": None, "with_str_default": "foo"} # should be included when a non-default value is explicitly given model = ModelWithDefaultField.construct(with_none_default="bar", with_str_default="baz") assert model.with_none_default == "bar" assert model.with_str_default == "baz" assert transform(model, Any) == {"with_none_default": "bar", "with_str_default": "baz"} class TypedDictIterableUnion(TypedDict): foo: Annotated[Union[Bar8, Iterable[Baz8]], PropertyInfo(alias="FOO")] class Bar8(TypedDict): foo_bar: Annotated[str, PropertyInfo(alias="fooBar")] class Baz8(TypedDict): foo_baz: Annotated[str, PropertyInfo(alias="fooBaz")] def test_iterable_of_dictionaries() -> None: assert transform({"foo": [{"foo_baz": "bar"}]}, TypedDictIterableUnion) == {"FOO": [{"fooBaz": "bar"}]} assert cast(Any, transform({"foo": ({"foo_baz": "bar"},)}, TypedDictIterableUnion)) == {"FOO": [{"fooBaz": "bar"}]} def my_iter() -> Iterable[Baz8]: yield {"foo_baz": "hello"} yield {"foo_baz": "world"} assert transform({"foo": my_iter()}, TypedDictIterableUnion) == {"FOO": [{"fooBaz": "hello"}, {"fooBaz": "world"}]} class TypedDictIterableUnionStr(TypedDict): foo: Annotated[Union[str, Iterable[Baz8]], PropertyInfo(alias="FOO")] def test_iterable_union_str() -> None: assert transform({"foo": "bar"}, TypedDictIterableUnionStr) == {"FOO": "bar"} assert cast(Any, transform(iter([{"foo_baz": "bar"}]), Union[str, Iterable[Baz8]])) == [{"fooBaz": "bar"}] openai-python-1.12.0/tests/test_utils/000077500000000000000000000000001456126711000177315ustar00rootroot00000000000000openai-python-1.12.0/tests/test_utils/test_proxy.py000066400000000000000000000012571456126711000225300ustar00rootroot00000000000000import operator from typing import Any from typing_extensions import override from openai._utils import LazyProxy class RecursiveLazyProxy(LazyProxy[Any]): @override def __load__(self) -> Any: return self def __call__(self, *_args: Any, **_kwds: Any) -> Any: raise RuntimeError("This should never be called!") def test_recursive_proxy() -> None: proxy = RecursiveLazyProxy() assert repr(proxy) == "RecursiveLazyProxy" assert str(proxy) == "RecursiveLazyProxy" assert dir(proxy) == [] assert type(proxy).__name__ == "RecursiveLazyProxy" assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" openai-python-1.12.0/tests/test_utils/test_typing.py000066400000000000000000000046551456126711000226660ustar00rootroot00000000000000from __future__ import annotations from typing import Generic, TypeVar, cast from openai._utils import extract_type_var_from_base _T = TypeVar("_T") _T2 = TypeVar("_T2") _T3 = TypeVar("_T3") class BaseGeneric(Generic[_T]): ... class SubclassGeneric(BaseGeneric[_T]): ... class BaseGenericMultipleTypeArgs(Generic[_T, _T2, _T3]): ... class SubclassGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ... class SubclassDifferentOrderGenericMultipleTypeArgs(BaseGenericMultipleTypeArgs[_T2, _T, _T3]): ... def test_extract_type_var() -> None: assert ( extract_type_var_from_base( BaseGeneric[int], index=0, generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), ) == int ) def test_extract_type_var_generic_subclass() -> None: assert ( extract_type_var_from_base( SubclassGeneric[int], index=0, generic_bases=cast("tuple[type, ...]", (BaseGeneric,)), ) == int ) def test_extract_type_var_multiple() -> None: typ = BaseGenericMultipleTypeArgs[int, str, None] generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) def test_extract_type_var_generic_subclass_multiple() -> None: typ = SubclassGenericMultipleTypeArgs[int, str, None] generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) def test_extract_type_var_generic_subclass_different_ordering_multiple() -> None: typ = SubclassDifferentOrderGenericMultipleTypeArgs[int, str, None] generic_bases = cast("tuple[type, ...]", (BaseGenericMultipleTypeArgs,)) assert extract_type_var_from_base(typ, index=0, generic_bases=generic_bases) == int assert extract_type_var_from_base(typ, index=1, generic_bases=generic_bases) == str assert extract_type_var_from_base(typ, index=2, generic_bases=generic_bases) == type(None) openai-python-1.12.0/tests/utils.py000066400000000000000000000074121456126711000172500ustar00rootroot00000000000000from __future__ import annotations import os import inspect import traceback import contextlib from typing import Any, TypeVar, Iterator, cast from datetime import date, datetime from typing_extensions import Literal, get_args, get_origin, assert_type from openai._types import NoneType from openai._utils import ( is_dict, is_list, is_list_type, is_union_type, ) from openai._compat import PYDANTIC_V2, field_outer_type, get_model_fields from openai._models import BaseModel BaseModelT = TypeVar("BaseModelT", bound=BaseModel) def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool: for name, field in get_model_fields(model).items(): field_value = getattr(value, name) if PYDANTIC_V2: allow_none = False else: # in v1 nullability was structured differently # https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields allow_none = getattr(field, "allow_none", False) assert_matches_type( field_outer_type(field), field_value, path=[*path, name], allow_none=allow_none, ) return True # Note: the `path` argument is only used to improve error messages when `--showlocals` is used def assert_matches_type( type_: Any, value: object, *, path: list[str], allow_none: bool = False, ) -> None: if allow_none and value is None: return if type_ is None or type_ is NoneType: assert value is None return origin = get_origin(type_) or type_ if is_list_type(type_): return _assert_list_type(type_, value) if origin == str: assert isinstance(value, str) elif origin == int: assert isinstance(value, int) elif origin == bool: assert isinstance(value, bool) elif origin == float: assert isinstance(value, float) elif origin == bytes: assert isinstance(value, bytes) elif origin == datetime: assert isinstance(value, datetime) elif origin == date: assert isinstance(value, date) elif origin == object: # nothing to do here, the expected type is unknown pass elif origin == Literal: assert value in get_args(type_) elif origin == dict: assert is_dict(value) args = get_args(type_) key_type = args[0] items_type = args[1] for key, item in value.items(): assert_matches_type(key_type, key, path=[*path, ""]) assert_matches_type(items_type, item, path=[*path, ""]) elif is_union_type(type_): for i, variant in enumerate(get_args(type_)): try: assert_matches_type(variant, value, path=[*path, f"variant {i}"]) return except AssertionError: traceback.print_exc() continue raise AssertionError("Did not match any variants") elif issubclass(origin, BaseModel): assert isinstance(value, type_) assert assert_matches_model(type_, cast(Any, value), path=path) elif inspect.isclass(origin) and origin.__name__ == "HttpxBinaryResponseContent": assert value.__class__.__name__ == "HttpxBinaryResponseContent" else: assert None, f"Unhandled field type: {type_}" def _assert_list_type(type_: type[object], value: object) -> None: assert is_list(value) inner_type = get_args(type_)[0] for entry in value: assert_type(inner_type, entry) # type: ignore @contextlib.contextmanager def update_env(**new_env: str) -> Iterator[None]: old = os.environ.copy() try: os.environ.update(new_env) yield None finally: os.environ.clear() os.environ.update(old)